problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_22169 | rasdani/github-patches | git_diff | holoviz__panel-2897 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
HoloViews Colorbar not styled for dark themed template
Panel: 0.12.4
The ColorBar is unreadable when using the `FastListTemplate` with the `dark` theme.
I would like this to work for an example I am contributing to the `scikit-image` docs.

```python
import holoviews as hv
import panel as pn
from skimage import data, filters
image = data.coins()
edges = filters.sobel(image)*256
bounds = (-1, -1, 1, 1)
after_img = hv.Image(edges, bounds=bounds).apply.opts(
cmap="binary_r", responsive=True, title="After", active_tools=["box_zoom"], tools=["hover"], colorbar=True
)
layout=pn.panel(after_img, height=800, aspect_ratio=1)
pn.template.FastListTemplate(
site="Panel and Scikit-Image",
main=[layout],
header_background="#292929",
theme="dark",
).servable()
```
## How do I fix this?
I would like to contribute a PR. I assume I would have to update the Bokeh Theme generation here https://github.com/holoviz/panel/blob/396ce2f37473a047e4721a1431e6795374686533/panel/template/fast/theme.py#L62.
I can now see that I need to change to


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/template/fast/theme.py`
Content:
```
1 """
2 Functionality for styling according to Fast.design
3 """
4 import pathlib
5 import param
6
7 from bokeh.themes import Theme as _BkTheme
8
9 from ..theme import DarkTheme, DefaultTheme
10
11 _ROOT = pathlib.Path(__file__).parent / "css"
12
13 COLLAPSED_SVG_ICON = """
14 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="collapsed-icon">
15 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
16 <path d="M9 5.44446V12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
17 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
18 </svg>
19 """ # noqa
20
21 EXPANDED_SVG_ICON = """
22 <svg style="stroke: var(--accent-fill-rest);" width="18" height="18" viewBox="0 0 18 18" fill="none" xmlns="http://www.w3.org/2000/svg" slot="expanded-icon">
23 <path d="M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z" stroke-linecap="round" stroke-linejoin="round"></path>
24 <path d="M5.44446 9H12.5556" stroke-linecap="round" stroke-linejoin="round"></path>
25 </svg>
26 """ # noqa
27
28 FONT_URL = "//fonts.googleapis.com/css?family=Open+Sans"
29
30 class FastStyle(param.Parameterized):
31 """
32 The FastStyle class provides the different colors and icons used
33 to style the Fast Templates.
34 """
35
36 background_color = param.String(default="#ffffff")
37 neutral_color = param.String(default="#000000")
38 accent_base_color = param.String(default="#A01346")
39 collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)
40 expanded_icon = param.String(default=EXPANDED_SVG_ICON)
41 color = param.String(default="#00aa41")
42 neutral_fill_card_rest = param.String(default="#F7F7F7")
43 neutral_focus = param.String(default="#888888")
44 neutral_foreground_rest = param.String(default="#2B2B2B")
45
46 header_background = param.String(default="#00aa41")
47 header_neutral_color = param.String(default="#ffffff")
48 header_accent_base_color = param.String(default="#ffffff")
49 header_color = param.String(default="#ffffff")
50 font = param.String(default="Open Sans, sans-serif")
51 font_url = param.String(default=FONT_URL)
52 corner_radius = param.Integer(default=3)
53 shadow = param.Boolean(default=True)
54
55 def create_bokeh_theme(self):
56 """Returns a custom bokeh theme based on the style parameters
57
58 Returns:
59 Dict: A Bokeh Theme
60 """
61
62 return {
63 "attrs": {
64 "Figure": {
65 "background_fill_color": self.background_color,
66 "border_fill_color": self.neutral_fill_card_rest,
67 "border_fill_alpha": 0,
68 "outline_line_color": self.neutral_focus,
69 "outline_line_alpha": 0.5,
70 "outline_line_width": 1,
71 },
72 "Grid": {"grid_line_color": self.neutral_focus, "grid_line_alpha": 0.25},
73 "Axis": {
74 "major_tick_line_alpha": 0.5,
75 "major_tick_line_color": self.neutral_foreground_rest,
76 "minor_tick_line_alpha": 0.25,
77 "minor_tick_line_color": self.neutral_foreground_rest,
78 "axis_line_alpha": 0.1,
79 "axis_line_color": self.neutral_foreground_rest,
80 "major_label_text_color": self.neutral_foreground_rest,
81 "major_label_text_font": self.font,
82 # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed
83 # "major_label_text_font_size": "1.025em",
84 "axis_label_standoff": 10,
85 "axis_label_text_color": self.neutral_foreground_rest,
86 "axis_label_text_font": self.font,
87 "axis_label_text_font_size": "1.25em",
88 "axis_label_text_font_style": "normal",
89 },
90 "Legend": {
91 "spacing": 8,
92 "glyph_width": 15,
93 "label_standoff": 8,
94 "label_text_color": self.neutral_foreground_rest,
95 "label_text_font": self.font,
96 "label_text_font_size": "1.025em",
97 "border_line_alpha": 0.5,
98 "border_line_color": self.neutral_focus,
99 "background_fill_alpha": 0.25,
100 "background_fill_color": self.neutral_fill_card_rest,
101 },
102 "ColorBar": {
103 "title_text_color": self.neutral_foreground_rest,
104 "title_text_font": self.font,
105 "title_text_font_size": "1.025em",
106 "title_text_font_style": "normal",
107 "major_label_text_color": self.neutral_foreground_rest,
108 "major_label_text_font": self.font,
109 "major_label_text_font_size": "1.025em",
110 # "background_fill_color": FAST_DARK_75,
111 "major_tick_line_alpha": 0,
112 "bar_line_alpha": 0,
113 },
114 "Title": {
115 "text_color": self.neutral_foreground_rest,
116 "text_font": self.font,
117 "text_font_size": "1.15em",
118 },
119 }
120 }
121
122
123 DEFAULT_STYLE = FastStyle()
124 DARK_STYLE = FastStyle(
125 accent_base_color="#E1477E",
126 background_color="#181818",
127 color="#ffffff",
128 header_background="#313131",
129 header_color="#ffffff",
130 neutral_fill_card_rest="#212121",
131 neutral_focus="#717171",
132 neutral_foreground_rest="#e5e5e5",
133 shadow = False,
134 )
135
136 class FastDefaultTheme(DefaultTheme):
137
138 base_css = param.Filename(default=_ROOT / 'fast_root_default.css')
139
140 style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)
141
142 __abstract = True
143
144 @property
145 def bokeh_theme(self):
146 return _BkTheme(json=self.style.create_bokeh_theme())
147
148
149 class FastDarkTheme(DarkTheme):
150
151 base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')
152
153 style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)
154
155 __abstract = True
156
157 @property
158 def bokeh_theme(self):
159 return _BkTheme(json=self.style.create_bokeh_theme())
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py
--- a/panel/template/fast/theme.py
+++ b/panel/template/fast/theme.py
@@ -100,6 +100,7 @@
"background_fill_color": self.neutral_fill_card_rest,
},
"ColorBar": {
+ "background_fill_color": self.background_color,
"title_text_color": self.neutral_foreground_rest,
"title_text_font": self.font,
"title_text_font_size": "1.025em",
@@ -107,7 +108,6 @@
"major_label_text_color": self.neutral_foreground_rest,
"major_label_text_font": self.font,
"major_label_text_font_size": "1.025em",
- # "background_fill_color": FAST_DARK_75,
"major_tick_line_alpha": 0,
"bar_line_alpha": 0,
},
| {"golden_diff": "diff --git a/panel/template/fast/theme.py b/panel/template/fast/theme.py\n--- a/panel/template/fast/theme.py\n+++ b/panel/template/fast/theme.py\n@@ -100,6 +100,7 @@\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n+ \"background_fill_color\": self.background_color,\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n@@ -107,7 +108,6 @@\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n- # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n", "issue": "HoloViews Colorbar not styled for dark themed template\nPanel: 0.12.4\r\n\r\nThe ColorBar is unreadable when using the `FastListTemplate` with the `dark` theme.\r\n\r\nI would like this to work for an example I am contributing to the `scikit-image` docs.\r\n\r\n\r\n\r\n```python\r\nimport holoviews as hv\r\nimport panel as pn\r\nfrom skimage import data, filters\r\n\r\nimage = data.coins()\r\nedges = filters.sobel(image)*256\r\nbounds = (-1, -1, 1, 1)\r\n\r\nafter_img = hv.Image(edges, bounds=bounds).apply.opts(\r\n cmap=\"binary_r\", responsive=True, title=\"After\", active_tools=[\"box_zoom\"], tools=[\"hover\"], colorbar=True\r\n)\r\n\r\nlayout=pn.panel(after_img, height=800, aspect_ratio=1)\r\n\r\npn.template.FastListTemplate(\r\n site=\"Panel and Scikit-Image\",\r\n main=[layout],\r\n header_background=\"#292929\",\r\n theme=\"dark\",\r\n).servable()\r\n```\r\n\r\n## How do I fix this?\r\n\r\nI would like to contribute a PR. I assume I would have to update the Bokeh Theme generation here https://github.com/holoviz/panel/blob/396ce2f37473a047e4721a1431e6795374686533/panel/template/fast/theme.py#L62.\r\n\r\nI can now see that I need to change to\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nFunctionality for styling according to Fast.design\n\"\"\"\nimport pathlib\nimport param\n\nfrom bokeh.themes import Theme as _BkTheme\n\nfrom ..theme import DarkTheme, DefaultTheme\n\n_ROOT = pathlib.Path(__file__).parent / \"css\"\n\nCOLLAPSED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"collapsed-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M9 5.44446V12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nEXPANDED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"expanded-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nFONT_URL = \"//fonts.googleapis.com/css?family=Open+Sans\"\n\nclass FastStyle(param.Parameterized):\n \"\"\"\n The FastStyle class provides the different colors and icons used\n to style the Fast Templates.\n \"\"\"\n\n background_color = param.String(default=\"#ffffff\")\n neutral_color = param.String(default=\"#000000\")\n accent_base_color = param.String(default=\"#A01346\")\n collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)\n expanded_icon = param.String(default=EXPANDED_SVG_ICON)\n color = param.String(default=\"#00aa41\")\n neutral_fill_card_rest = param.String(default=\"#F7F7F7\")\n neutral_focus = param.String(default=\"#888888\")\n neutral_foreground_rest = param.String(default=\"#2B2B2B\")\n\n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n header_accent_base_color = param.String(default=\"#ffffff\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n corner_radius = param.Integer(default=3)\n shadow = param.Boolean(default=True)\n\n def create_bokeh_theme(self):\n \"\"\"Returns a custom bokeh theme based on the style parameters\n\n Returns:\n Dict: A Bokeh Theme\n \"\"\"\n\n return {\n \"attrs\": {\n \"Figure\": {\n \"background_fill_color\": self.background_color,\n \"border_fill_color\": self.neutral_fill_card_rest,\n \"border_fill_alpha\": 0,\n \"outline_line_color\": self.neutral_focus,\n \"outline_line_alpha\": 0.5,\n \"outline_line_width\": 1,\n },\n \"Grid\": {\"grid_line_color\": self.neutral_focus, \"grid_line_alpha\": 0.25},\n \"Axis\": {\n \"major_tick_line_alpha\": 0.5,\n \"major_tick_line_color\": self.neutral_foreground_rest,\n \"minor_tick_line_alpha\": 0.25,\n \"minor_tick_line_color\": self.neutral_foreground_rest,\n \"axis_line_alpha\": 0.1,\n \"axis_line_color\": self.neutral_foreground_rest,\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed\n # \"major_label_text_font_size\": \"1.025em\",\n \"axis_label_standoff\": 10,\n \"axis_label_text_color\": self.neutral_foreground_rest,\n \"axis_label_text_font\": self.font,\n \"axis_label_text_font_size\": \"1.25em\",\n \"axis_label_text_font_style\": \"normal\",\n },\n \"Legend\": {\n \"spacing\": 8,\n \"glyph_width\": 15,\n \"label_standoff\": 8,\n \"label_text_color\": self.neutral_foreground_rest,\n \"label_text_font\": self.font,\n \"label_text_font_size\": \"1.025em\",\n \"border_line_alpha\": 0.5,\n \"border_line_color\": self.neutral_focus,\n \"background_fill_alpha\": 0.25,\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n \"title_text_font_style\": \"normal\",\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n # \"background_fill_color\": FAST_DARK_75,\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n \"Title\": {\n \"text_color\": self.neutral_foreground_rest,\n \"text_font\": self.font,\n \"text_font_size\": \"1.15em\",\n },\n }\n }\n\n\nDEFAULT_STYLE = FastStyle()\nDARK_STYLE = FastStyle(\n accent_base_color=\"#E1477E\",\n background_color=\"#181818\",\n color=\"#ffffff\",\n header_background=\"#313131\",\n header_color=\"#ffffff\",\n neutral_fill_card_rest=\"#212121\",\n neutral_focus=\"#717171\",\n neutral_foreground_rest=\"#e5e5e5\",\n shadow = False,\n)\n\nclass FastDefaultTheme(DefaultTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_default.css')\n\n style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n\n\nclass FastDarkTheme(DarkTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')\n\n style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n", "path": "panel/template/fast/theme.py"}], "after_files": [{"content": "\"\"\"\nFunctionality for styling according to Fast.design\n\"\"\"\nimport pathlib\nimport param\n\nfrom bokeh.themes import Theme as _BkTheme\n\nfrom ..theme import DarkTheme, DefaultTheme\n\n_ROOT = pathlib.Path(__file__).parent / \"css\"\n\nCOLLAPSED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"collapsed-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M9 5.44446V12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nEXPANDED_SVG_ICON = \"\"\"\n<svg style=\"stroke: var(--accent-fill-rest);\" width=\"18\" height=\"18\" viewBox=\"0 0 18 18\" fill=\"none\" xmlns=\"http://www.w3.org/2000/svg\" slot=\"expanded-icon\">\n <path d=\"M15.2222 1H2.77778C1.79594 1 1 1.79594 1 2.77778V15.2222C1 16.2041 1.79594 17 2.77778 17H15.2222C16.2041 17 17 16.2041 17 15.2222V2.77778C17 1.79594 16.2041 1 15.2222 1Z\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n <path d=\"M5.44446 9H12.5556\" stroke-linecap=\"round\" stroke-linejoin=\"round\"></path>\n</svg>\n\"\"\" # noqa\n\nFONT_URL = \"//fonts.googleapis.com/css?family=Open+Sans\"\n\nclass FastStyle(param.Parameterized):\n \"\"\"\n The FastStyle class provides the different colors and icons used\n to style the Fast Templates.\n \"\"\"\n\n background_color = param.String(default=\"#ffffff\")\n neutral_color = param.String(default=\"#000000\")\n accent_base_color = param.String(default=\"#A01346\")\n collapsed_icon = param.String(default=COLLAPSED_SVG_ICON)\n expanded_icon = param.String(default=EXPANDED_SVG_ICON)\n color = param.String(default=\"#00aa41\")\n neutral_fill_card_rest = param.String(default=\"#F7F7F7\")\n neutral_focus = param.String(default=\"#888888\")\n neutral_foreground_rest = param.String(default=\"#2B2B2B\")\n\n header_background = param.String(default=\"#00aa41\")\n header_neutral_color = param.String(default=\"#ffffff\")\n header_accent_base_color = param.String(default=\"#ffffff\")\n header_color = param.String(default=\"#ffffff\")\n font = param.String(default=\"Open Sans, sans-serif\")\n font_url = param.String(default=FONT_URL)\n corner_radius = param.Integer(default=3)\n shadow = param.Boolean(default=True)\n\n def create_bokeh_theme(self):\n \"\"\"Returns a custom bokeh theme based on the style parameters\n\n Returns:\n Dict: A Bokeh Theme\n \"\"\"\n\n return {\n \"attrs\": {\n \"Figure\": {\n \"background_fill_color\": self.background_color,\n \"border_fill_color\": self.neutral_fill_card_rest,\n \"border_fill_alpha\": 0,\n \"outline_line_color\": self.neutral_focus,\n \"outline_line_alpha\": 0.5,\n \"outline_line_width\": 1,\n },\n \"Grid\": {\"grid_line_color\": self.neutral_focus, \"grid_line_alpha\": 0.25},\n \"Axis\": {\n \"major_tick_line_alpha\": 0.5,\n \"major_tick_line_color\": self.neutral_foreground_rest,\n \"minor_tick_line_alpha\": 0.25,\n \"minor_tick_line_color\": self.neutral_foreground_rest,\n \"axis_line_alpha\": 0.1,\n \"axis_line_color\": self.neutral_foreground_rest,\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n # Should be added back when bokeh 2.3.3 is released and https://github.com/bokeh/bokeh/issues/11110 fixed\n # \"major_label_text_font_size\": \"1.025em\",\n \"axis_label_standoff\": 10,\n \"axis_label_text_color\": self.neutral_foreground_rest,\n \"axis_label_text_font\": self.font,\n \"axis_label_text_font_size\": \"1.25em\",\n \"axis_label_text_font_style\": \"normal\",\n },\n \"Legend\": {\n \"spacing\": 8,\n \"glyph_width\": 15,\n \"label_standoff\": 8,\n \"label_text_color\": self.neutral_foreground_rest,\n \"label_text_font\": self.font,\n \"label_text_font_size\": \"1.025em\",\n \"border_line_alpha\": 0.5,\n \"border_line_color\": self.neutral_focus,\n \"background_fill_alpha\": 0.25,\n \"background_fill_color\": self.neutral_fill_card_rest,\n },\n \"ColorBar\": {\n \"background_fill_color\": self.background_color,\n \"title_text_color\": self.neutral_foreground_rest,\n \"title_text_font\": self.font,\n \"title_text_font_size\": \"1.025em\",\n \"title_text_font_style\": \"normal\",\n \"major_label_text_color\": self.neutral_foreground_rest,\n \"major_label_text_font\": self.font,\n \"major_label_text_font_size\": \"1.025em\",\n \"major_tick_line_alpha\": 0,\n \"bar_line_alpha\": 0,\n },\n \"Title\": {\n \"text_color\": self.neutral_foreground_rest,\n \"text_font\": self.font,\n \"text_font_size\": \"1.15em\",\n },\n }\n }\n\n\nDEFAULT_STYLE = FastStyle()\nDARK_STYLE = FastStyle(\n accent_base_color=\"#E1477E\",\n background_color=\"#181818\",\n color=\"#ffffff\",\n header_background=\"#313131\",\n header_color=\"#ffffff\",\n neutral_fill_card_rest=\"#212121\",\n neutral_focus=\"#717171\",\n neutral_foreground_rest=\"#e5e5e5\",\n shadow = False,\n)\n\nclass FastDefaultTheme(DefaultTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_default.css')\n\n style = param.ClassSelector(default=DEFAULT_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n\n\nclass FastDarkTheme(DarkTheme):\n\n base_css = param.Filename(default=_ROOT / 'fast_root_dark.css')\n\n style = param.ClassSelector(default=DARK_STYLE, class_=FastStyle)\n\n __abstract = True\n\n @property\n def bokeh_theme(self):\n return _BkTheme(json=self.style.create_bokeh_theme())\n", "path": "panel/template/fast/theme.py"}]} | 3,024 | 209 |
gh_patches_debug_7402 | rasdani/github-patches | git_diff | getsentry__sentry-66877 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Incorrect data in events when adding large integers to context data
Sentry is treating big values differently in tags and context causing some values to be in correct after event processing. Below is an example event I sent using the ruby SDK, the same behaviour happens in Python as well:
Script:
```
::Sentry.init do |config|
config.dsn = "_MY_DSN_"
end
Sentry.set_tags('bigNumber': 608548899684111178)
Sentry.configure_scope do |scope|
scope.set_context(
'arguments',
{
name: 'A big value',
age: 608548899684111178,
}
)
end
Sentry.capture_message("le big number test")
```
I expect to see both tag and context showing the same value, but in the context session it is showing as `608548899684111200` This is a common issue with large integer/floating point in Node.
A possible workaround is to add the value as a string when it is added manually, but it is not applicable when the data is automatically added by the SDK.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/sentry/interfaces/contexts.py`
Content:
```
1 from __future__ import annotations
2
3 import string
4 from typing import ClassVar, TypeVar
5
6 from django.utils.encoding import force_str
7
8 from sentry.interfaces.base import Interface
9 from sentry.utils.json import prune_empty_keys
10 from sentry.utils.safe import get_path
11
12 __all__ = ("Contexts",)
13
14 ContextTypeT = TypeVar("ContextTypeT", bound="ContextType")
15
16 context_types: dict[str, type[ContextType]] = {}
17
18
19 class _IndexFormatter(string.Formatter):
20 def format_field(self, value, format_spec):
21 if not format_spec and isinstance(value, bool):
22 return value and "yes" or "no"
23 return string.Formatter.format_field(self, value, format_spec)
24
25
26 def format_index_expr(format_string, data):
27 return str(_IndexFormatter().vformat(str(format_string), (), data).strip())
28
29
30 def contexttype(cls: type[ContextTypeT]) -> type[ContextTypeT]:
31 context_types[cls.type] = cls
32 return cls
33
34
35 # NOTE: Are you adding a new context? Make sure to also update the
36 # documentation in the sentry develop docs [0]!
37 #
38 # [0]: https://develop.sentry.dev/sdk/event-payloads/contexts
39
40
41 class ContextType:
42 context_to_tag_mapping: ClassVar[dict[str, str]] = {}
43 """
44 This indicates which fields should be promoted into tags during event
45 normalization. (See EventManager)
46
47 The key for each entry is used as the name of the tag suffixed by the
48 "alias" of the context (this is the key of the context in the contexts
49 object, it is NOT the `type` of the context, though they are often the
50 same).
51
52 The value is a format string spec that uses python string.Formatter to
53 interpolate any value from the context object.
54
55 There is one special case:
56
57 - When the key of the mapping is an empty string the tag name will simply be
58 the alias.
59
60 For example if you have a context named "myContext" with the data:
61
62 ```json
63 "myContext": {
64 "some_value": "hello world",
65 "subkey": "whatever",
66 "type": "myContext"
67 }
68 ```
69
70 and you have a context_to_tag_mapping that looks like
71
72 ```python
73 context_to_tag_mapping = {"": "{some_value}", "subkey": "{subkey}"}
74 ```
75
76 Then normalization will result in two tags being promoted:
77
78 - myContext: "hello world"
79 - myContext.subkey: "whatever"
80 """
81
82 type: str
83 """This should match the `type` key in context object"""
84
85 def __init__(self, alias, data):
86 self.alias = alias
87 ctx_data = {}
88 for key, value in data.items():
89 # we use a simple check here, rather than ' in set()' to avoid
90 # issues with maps/lists.
91
92 # Even if the value is an empty string,
93 # we still want to display the info the UI
94 if value is not None:
95 ctx_data[force_str(key)] = value
96 self.data = ctx_data
97
98 def to_json(self):
99 rv = dict(self.data)
100 rv["type"] = self.type
101 return prune_empty_keys(rv)
102
103 @classmethod
104 def values_for_data(cls, data):
105 rv = []
106 for context in (data.get("contexts") or {}).values():
107 if context and context.get("type") == cls.type:
108 rv.append(context)
109 return rv
110
111 @classmethod
112 def primary_value_for_data(cls, data):
113 val = get_path(data, "contexts", cls.type)
114 if val and val.get("type") == cls.type:
115 return val
116
117 rv = cls.values_for_data(data)
118 if len(rv) == 1:
119 return rv[0]
120
121 def iter_tags(self):
122 if self.context_to_tag_mapping:
123 for field, f_string in self.context_to_tag_mapping.items():
124 try:
125 value = format_index_expr(f_string, self.data)
126 except KeyError:
127 continue
128 if value:
129 if not field:
130 yield (self.alias, value)
131 else:
132 yield (f"{self.alias}.{field}", value)
133
134
135 # TODO(dcramer): contexts need to document/describe expected (optional) fields
136 @contexttype
137 class DefaultContextType(ContextType):
138 type = "default"
139
140
141 @contexttype
142 class AppContextType(ContextType):
143 type = "app"
144 context_to_tag_mapping = {"device": "{device_app_hash}"}
145
146
147 @contexttype
148 class DeviceContextType(ContextType):
149 type = "device"
150 context_to_tag_mapping = {"": "{model}", "family": "{family}"}
151 # model_id, arch
152
153
154 @contexttype
155 class RuntimeContextType(ContextType):
156 type = "runtime"
157 context_to_tag_mapping = {"": "{name} {version}", "name": "{name}"}
158
159
160 @contexttype
161 class BrowserContextType(ContextType):
162 type = "browser"
163 context_to_tag_mapping = {"": "{name} {version}", "name": "{name}"}
164 # viewport
165
166
167 @contexttype
168 class OsContextType(ContextType):
169 type = "os"
170 context_to_tag_mapping = {"": "{name} {version}", "name": "{name}", "rooted": "{rooted}"}
171 # build, rooted
172
173
174 @contexttype
175 class GpuContextType(ContextType):
176 type = "gpu"
177 context_to_tag_mapping = {"name": "{name}", "vendor": "{vendor_name}"}
178
179
180 @contexttype
181 class MonitorContextType(ContextType):
182 type = "monitor"
183 context_to_tag_mapping = {"id": "{id}", "slug": "{slug}"}
184
185
186 @contexttype
187 class TraceContextType(ContextType):
188 type = "trace"
189 context_to_tag_mapping = {}
190
191
192 @contexttype
193 class OtelContextType(ContextType):
194 type = "otel"
195 context_to_tag_mapping = {}
196
197
198 class Contexts(Interface):
199 """
200 This interface stores context specific information.
201 """
202
203 display_score = 1100
204 score = 800
205
206 @classmethod
207 def to_python(cls, data, **kwargs):
208 rv = {}
209
210 # Note the alias is the key of the context entry
211 for alias, value in data.items():
212 # XXX(markus): The `None`-case should be handled in the UI and
213 # other consumers of this interface
214 if value is not None:
215 rv[alias] = cls.normalize_context(alias, value)
216
217 return super().to_python(rv, **kwargs)
218
219 @classmethod
220 def normalize_context(cls, alias, data):
221 ctx_type = data.get("type", alias)
222 ctx_cls = context_types.get(ctx_type, DefaultContextType)
223 return ctx_cls(alias, data)
224
225 def iter_contexts(self):
226 return self._data.values()
227
228 def to_json(self):
229 rv = {}
230 for alias, inst in self._data.items():
231 rv[alias] = inst.to_json()
232 return rv
233
234 def iter_tags(self):
235 for inst in self.iter_contexts():
236 yield from inst.iter_tags()
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/sentry/interfaces/contexts.py b/src/sentry/interfaces/contexts.py
--- a/src/sentry/interfaces/contexts.py
+++ b/src/sentry/interfaces/contexts.py
@@ -93,6 +93,9 @@
# we still want to display the info the UI
if value is not None:
ctx_data[force_str(key)] = value
+ # Numbers exceeding 15 place values will be converted to strings to avoid rendering issues
+ if isinstance(value, (int, float)) and len(str_value := force_str(value)) > 15:
+ ctx_data[force_str(key)] = str_value
self.data = ctx_data
def to_json(self):
| {"golden_diff": "diff --git a/src/sentry/interfaces/contexts.py b/src/sentry/interfaces/contexts.py\n--- a/src/sentry/interfaces/contexts.py\n+++ b/src/sentry/interfaces/contexts.py\n@@ -93,6 +93,9 @@\n # we still want to display the info the UI\n if value is not None:\n ctx_data[force_str(key)] = value\n+ # Numbers exceeding 15 place values will be converted to strings to avoid rendering issues\n+ if isinstance(value, (int, float)) and len(str_value := force_str(value)) > 15:\n+ ctx_data[force_str(key)] = str_value\n self.data = ctx_data\n \n def to_json(self):\n", "issue": "Incorrect data in events when adding large integers to context data \nSentry is treating big values differently in tags and context causing some values to be in correct after event processing. Below is an example event I sent using the ruby SDK, the same behaviour happens in Python as well: \r\n\r\nScript:\r\n```\r\n::Sentry.init do |config|\r\n config.dsn = \"_MY_DSN_\"\r\nend\r\n\r\nSentry.set_tags('bigNumber': 608548899684111178)\r\nSentry.configure_scope do |scope|\r\n scope.set_context(\r\n 'arguments',\r\n {\r\n name: 'A big value',\r\n age: 608548899684111178,\r\n }\r\n )\r\nend\r\n\r\nSentry.capture_message(\"le big number test\")\r\n```\r\n\r\nI expect to see both tag and context showing the same value, but in the context session it is showing as `608548899684111200` This is a common issue with large integer/floating point in Node. \r\n\r\nA possible workaround is to add the value as a string when it is added manually, but it is not applicable when the data is automatically added by the SDK.\r\n\r\n\r\n\n", "before_files": [{"content": "from __future__ import annotations\n\nimport string\nfrom typing import ClassVar, TypeVar\n\nfrom django.utils.encoding import force_str\n\nfrom sentry.interfaces.base import Interface\nfrom sentry.utils.json import prune_empty_keys\nfrom sentry.utils.safe import get_path\n\n__all__ = (\"Contexts\",)\n\nContextTypeT = TypeVar(\"ContextTypeT\", bound=\"ContextType\")\n\ncontext_types: dict[str, type[ContextType]] = {}\n\n\nclass _IndexFormatter(string.Formatter):\n def format_field(self, value, format_spec):\n if not format_spec and isinstance(value, bool):\n return value and \"yes\" or \"no\"\n return string.Formatter.format_field(self, value, format_spec)\n\n\ndef format_index_expr(format_string, data):\n return str(_IndexFormatter().vformat(str(format_string), (), data).strip())\n\n\ndef contexttype(cls: type[ContextTypeT]) -> type[ContextTypeT]:\n context_types[cls.type] = cls\n return cls\n\n\n# NOTE: Are you adding a new context? Make sure to also update the\n# documentation in the sentry develop docs [0]!\n#\n# [0]: https://develop.sentry.dev/sdk/event-payloads/contexts\n\n\nclass ContextType:\n context_to_tag_mapping: ClassVar[dict[str, str]] = {}\n \"\"\"\n This indicates which fields should be promoted into tags during event\n normalization. (See EventManager)\n\n The key for each entry is used as the name of the tag suffixed by the\n \"alias\" of the context (this is the key of the context in the contexts\n object, it is NOT the `type` of the context, though they are often the\n same).\n\n The value is a format string spec that uses python string.Formatter to\n interpolate any value from the context object.\n\n There is one special case:\n\n - When the key of the mapping is an empty string the tag name will simply be\n the alias.\n\n For example if you have a context named \"myContext\" with the data:\n\n ```json\n \"myContext\": {\n \"some_value\": \"hello world\",\n \"subkey\": \"whatever\",\n \"type\": \"myContext\"\n }\n ```\n\n and you have a context_to_tag_mapping that looks like\n\n ```python\n context_to_tag_mapping = {\"\": \"{some_value}\", \"subkey\": \"{subkey}\"}\n ```\n\n Then normalization will result in two tags being promoted:\n\n - myContext: \"hello world\"\n - myContext.subkey: \"whatever\"\n \"\"\"\n\n type: str\n \"\"\"This should match the `type` key in context object\"\"\"\n\n def __init__(self, alias, data):\n self.alias = alias\n ctx_data = {}\n for key, value in data.items():\n # we use a simple check here, rather than ' in set()' to avoid\n # issues with maps/lists.\n\n # Even if the value is an empty string,\n # we still want to display the info the UI\n if value is not None:\n ctx_data[force_str(key)] = value\n self.data = ctx_data\n\n def to_json(self):\n rv = dict(self.data)\n rv[\"type\"] = self.type\n return prune_empty_keys(rv)\n\n @classmethod\n def values_for_data(cls, data):\n rv = []\n for context in (data.get(\"contexts\") or {}).values():\n if context and context.get(\"type\") == cls.type:\n rv.append(context)\n return rv\n\n @classmethod\n def primary_value_for_data(cls, data):\n val = get_path(data, \"contexts\", cls.type)\n if val and val.get(\"type\") == cls.type:\n return val\n\n rv = cls.values_for_data(data)\n if len(rv) == 1:\n return rv[0]\n\n def iter_tags(self):\n if self.context_to_tag_mapping:\n for field, f_string in self.context_to_tag_mapping.items():\n try:\n value = format_index_expr(f_string, self.data)\n except KeyError:\n continue\n if value:\n if not field:\n yield (self.alias, value)\n else:\n yield (f\"{self.alias}.{field}\", value)\n\n\n# TODO(dcramer): contexts need to document/describe expected (optional) fields\n@contexttype\nclass DefaultContextType(ContextType):\n type = \"default\"\n\n\n@contexttype\nclass AppContextType(ContextType):\n type = \"app\"\n context_to_tag_mapping = {\"device\": \"{device_app_hash}\"}\n\n\n@contexttype\nclass DeviceContextType(ContextType):\n type = \"device\"\n context_to_tag_mapping = {\"\": \"{model}\", \"family\": \"{family}\"}\n # model_id, arch\n\n\n@contexttype\nclass RuntimeContextType(ContextType):\n type = \"runtime\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\"}\n\n\n@contexttype\nclass BrowserContextType(ContextType):\n type = \"browser\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\"}\n # viewport\n\n\n@contexttype\nclass OsContextType(ContextType):\n type = \"os\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\", \"rooted\": \"{rooted}\"}\n # build, rooted\n\n\n@contexttype\nclass GpuContextType(ContextType):\n type = \"gpu\"\n context_to_tag_mapping = {\"name\": \"{name}\", \"vendor\": \"{vendor_name}\"}\n\n\n@contexttype\nclass MonitorContextType(ContextType):\n type = \"monitor\"\n context_to_tag_mapping = {\"id\": \"{id}\", \"slug\": \"{slug}\"}\n\n\n@contexttype\nclass TraceContextType(ContextType):\n type = \"trace\"\n context_to_tag_mapping = {}\n\n\n@contexttype\nclass OtelContextType(ContextType):\n type = \"otel\"\n context_to_tag_mapping = {}\n\n\nclass Contexts(Interface):\n \"\"\"\n This interface stores context specific information.\n \"\"\"\n\n display_score = 1100\n score = 800\n\n @classmethod\n def to_python(cls, data, **kwargs):\n rv = {}\n\n # Note the alias is the key of the context entry\n for alias, value in data.items():\n # XXX(markus): The `None`-case should be handled in the UI and\n # other consumers of this interface\n if value is not None:\n rv[alias] = cls.normalize_context(alias, value)\n\n return super().to_python(rv, **kwargs)\n\n @classmethod\n def normalize_context(cls, alias, data):\n ctx_type = data.get(\"type\", alias)\n ctx_cls = context_types.get(ctx_type, DefaultContextType)\n return ctx_cls(alias, data)\n\n def iter_contexts(self):\n return self._data.values()\n\n def to_json(self):\n rv = {}\n for alias, inst in self._data.items():\n rv[alias] = inst.to_json()\n return rv\n\n def iter_tags(self):\n for inst in self.iter_contexts():\n yield from inst.iter_tags()\n", "path": "src/sentry/interfaces/contexts.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport string\nfrom typing import ClassVar, TypeVar\n\nfrom django.utils.encoding import force_str\n\nfrom sentry.interfaces.base import Interface\nfrom sentry.utils.json import prune_empty_keys\nfrom sentry.utils.safe import get_path\n\n__all__ = (\"Contexts\",)\n\nContextTypeT = TypeVar(\"ContextTypeT\", bound=\"ContextType\")\n\ncontext_types: dict[str, type[ContextType]] = {}\n\n\nclass _IndexFormatter(string.Formatter):\n def format_field(self, value, format_spec):\n if not format_spec and isinstance(value, bool):\n return value and \"yes\" or \"no\"\n return string.Formatter.format_field(self, value, format_spec)\n\n\ndef format_index_expr(format_string, data):\n return str(_IndexFormatter().vformat(str(format_string), (), data).strip())\n\n\ndef contexttype(cls: type[ContextTypeT]) -> type[ContextTypeT]:\n context_types[cls.type] = cls\n return cls\n\n\n# NOTE: Are you adding a new context? Make sure to also update the\n# documentation in the sentry develop docs [0]!\n#\n# [0]: https://develop.sentry.dev/sdk/event-payloads/contexts\n\n\nclass ContextType:\n context_to_tag_mapping: ClassVar[dict[str, str]] = {}\n \"\"\"\n This indicates which fields should be promoted into tags during event\n normalization. (See EventManager)\n\n The key for each entry is used as the name of the tag suffixed by the\n \"alias\" of the context (this is the key of the context in the contexts\n object, it is NOT the `type` of the context, though they are often the\n same).\n\n The value is a format string spec that uses python string.Formatter to\n interpolate any value from the context object.\n\n There is one special case:\n\n - When the key of the mapping is an empty string the tag name will simply be\n the alias.\n\n For example if you have a context named \"myContext\" with the data:\n\n ```json\n \"myContext\": {\n \"some_value\": \"hello world\",\n \"subkey\": \"whatever\",\n \"type\": \"myContext\"\n }\n ```\n\n and you have a context_to_tag_mapping that looks like\n\n ```python\n context_to_tag_mapping = {\"\": \"{some_value}\", \"subkey\": \"{subkey}\"}\n ```\n\n Then normalization will result in two tags being promoted:\n\n - myContext: \"hello world\"\n - myContext.subkey: \"whatever\"\n \"\"\"\n\n type: str\n \"\"\"This should match the `type` key in context object\"\"\"\n\n def __init__(self, alias, data):\n self.alias = alias\n ctx_data = {}\n for key, value in data.items():\n # we use a simple check here, rather than ' in set()' to avoid\n # issues with maps/lists.\n\n # Even if the value is an empty string,\n # we still want to display the info the UI\n if value is not None:\n ctx_data[force_str(key)] = value\n # Numbers exceeding 15 place values will be converted to strings to avoid rendering issues\n if isinstance(value, (int, float)) and len(str_value := force_str(value)) > 15:\n ctx_data[force_str(key)] = str_value\n self.data = ctx_data\n\n def to_json(self):\n rv = dict(self.data)\n rv[\"type\"] = self.type\n return prune_empty_keys(rv)\n\n @classmethod\n def values_for_data(cls, data):\n rv = []\n for context in (data.get(\"contexts\") or {}).values():\n if context and context.get(\"type\") == cls.type:\n rv.append(context)\n return rv\n\n @classmethod\n def primary_value_for_data(cls, data):\n val = get_path(data, \"contexts\", cls.type)\n if val and val.get(\"type\") == cls.type:\n return val\n\n rv = cls.values_for_data(data)\n if len(rv) == 1:\n return rv[0]\n\n def iter_tags(self):\n if self.context_to_tag_mapping:\n for field, f_string in self.context_to_tag_mapping.items():\n try:\n value = format_index_expr(f_string, self.data)\n except KeyError:\n continue\n if value:\n if not field:\n yield (self.alias, value)\n else:\n yield (f\"{self.alias}.{field}\", value)\n\n\n# TODO(dcramer): contexts need to document/describe expected (optional) fields\n@contexttype\nclass DefaultContextType(ContextType):\n type = \"default\"\n\n\n@contexttype\nclass AppContextType(ContextType):\n type = \"app\"\n context_to_tag_mapping = {\"device\": \"{device_app_hash}\"}\n\n\n@contexttype\nclass DeviceContextType(ContextType):\n type = \"device\"\n context_to_tag_mapping = {\"\": \"{model}\", \"family\": \"{family}\"}\n # model_id, arch\n\n\n@contexttype\nclass RuntimeContextType(ContextType):\n type = \"runtime\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\"}\n\n\n@contexttype\nclass BrowserContextType(ContextType):\n type = \"browser\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\"}\n # viewport\n\n\n@contexttype\nclass OsContextType(ContextType):\n type = \"os\"\n context_to_tag_mapping = {\"\": \"{name} {version}\", \"name\": \"{name}\", \"rooted\": \"{rooted}\"}\n # build, rooted\n\n\n@contexttype\nclass GpuContextType(ContextType):\n type = \"gpu\"\n context_to_tag_mapping = {\"name\": \"{name}\", \"vendor\": \"{vendor_name}\"}\n\n\n@contexttype\nclass MonitorContextType(ContextType):\n type = \"monitor\"\n context_to_tag_mapping = {\"id\": \"{id}\", \"slug\": \"{slug}\"}\n\n\n@contexttype\nclass TraceContextType(ContextType):\n type = \"trace\"\n context_to_tag_mapping = {}\n\n\n@contexttype\nclass OtelContextType(ContextType):\n type = \"otel\"\n context_to_tag_mapping = {}\n\n\nclass Contexts(Interface):\n \"\"\"\n This interface stores context specific information.\n \"\"\"\n\n display_score = 1100\n score = 800\n\n @classmethod\n def to_python(cls, data, **kwargs):\n rv = {}\n\n # Note the alias is the key of the context entry\n for alias, value in data.items():\n # XXX(markus): The `None`-case should be handled in the UI and\n # other consumers of this interface\n if value is not None:\n rv[alias] = cls.normalize_context(alias, value)\n\n return super().to_python(rv, **kwargs)\n\n @classmethod\n def normalize_context(cls, alias, data):\n ctx_type = data.get(\"type\", alias)\n ctx_cls = context_types.get(ctx_type, DefaultContextType)\n return ctx_cls(alias, data)\n\n def iter_contexts(self):\n return self._data.values()\n\n def to_json(self):\n rv = {}\n for alias, inst in self._data.items():\n rv[alias] = inst.to_json()\n return rv\n\n def iter_tags(self):\n for inst in self.iter_contexts():\n yield from inst.iter_tags()\n", "path": "src/sentry/interfaces/contexts.py"}]} | 2,783 | 153 |
gh_patches_debug_29778 | rasdani/github-patches | git_diff | vaexio__vaex-1027 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow lazy file opening for parquet formatted files
When I run `open` on an HDF5 file, the operation is done lazily – I believe that file handlers are opened but nothing is loaded into RAM. However running `open` on a parquet file results in eager evaluation, where data is immediately loaded into RAM. (Same goes for `open_many`, FWIW). This means that it's impossible to use vaex on large-than-RAM datasets if they're stored as parquet.
I've put a minimal working example below, although note it'll use like 3-6GB of RAM and dump like 5 GB to your HD:
```python
import numpy as np
import pandas as pd
import time
import vaex
# Creates a roughly 2.4 GB dataframe
pandas_df = pd.DataFrame({
"numeric_1": np.random.rand(100000000),
"numeric_2": np.random.rand(100000000),
"numeric_3": np.random.rand(100000000)
})
# Saves to parquet and vaex-formatted hdf5
pandas_df.to_parquet("temp_df.parquet", index=False)
vaex_df = vaex.from_pandas(pandas_df)
vaex_df.export_hdf5("temp_df.hdf5")
del vaex_df
del pandas_df
start = time.time()
vaex_df_hdf5 = vaex.open("temp_df.hdf5")
print(f"Took {time.time() - start:.2f} seconds to open the hdf5 file")
start = time.time()
vaex_df_parquet = vaex.open("temp_df.parquet")
print(f"Took {time.time() - start:.2f} seconds to open the parquet file")
```
Output on my machine:
```
Took 0.01 seconds to open the hdf5 file
Took 7.89 seconds to open the parquet file
```
(You can also see that RAM usage is up, although printing that to screen from python is trickier 😄.)
I have no idea if it's possible, but it would be really awesome to get the same lazy opening for parquet files – A fair amount of the files I interact with are stored as parquet, and converting to HDF5 would be time consuming and annoying.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/arrow/dataset.py`
Content:
```
1 __author__ = 'maartenbreddels'
2 import collections
3 import concurrent.futures
4 import logging
5 import multiprocessing
6 import os
7
8 import pyarrow as pa
9 import pyarrow.dataset
10
11 import vaex.dataset
12 import vaex.file.other
13 from ..itertools import buffer
14
15
16 logger = logging.getLogger("vaex.arrow.dataset")
17
18 thread_count_default_io = os.environ.get('VAEX_NUM_THREADS_IO', multiprocessing.cpu_count() * 2 + 1)
19 thread_count_default_io = int(thread_count_default_io)
20 main_io_pool = None
21
22 logger = logging.getLogger("vaex.multithreading")
23
24
25 def get_main_io_pool():
26 global main_io_pool
27 if main_io_pool is None:
28 main_io_pool = concurrent.futures.ThreadPoolExecutor(max_workers=thread_count_default_io)
29 return main_io_pool
30
31
32 class DatasetArrow(vaex.dataset.Dataset):
33 def __init__(self, ds):
34 super().__init__()
35 self._arrow_ds = ds
36 row_count = 0
37 for fragment in self._arrow_ds.get_fragments():
38 if hasattr(fragment, "ensure_complete_metadata"):
39 fragment.ensure_complete_metadata()
40 for rg in fragment.row_groups:
41 row_count += rg.num_rows
42 self._row_count = row_count
43 self._columns = {name: vaex.dataset.ColumnProxy(self, name, type) for name, type in
44 zip(self._arrow_ds.schema.names, self._arrow_ds.schema.types)}
45 self._ids = {}
46
47 def hashed(self):
48 raise NotImplementedError
49
50 def slice(self, start, end):
51 # TODO: we can be smarter here, and trim off some fragments
52 if start == 0 and end == self.row_count:
53 return self
54 return vaex.dataset.DatasetSliced(self, start=start, end=end)
55
56 def is_masked(self, column):
57 return False
58
59 def shape(self, column):
60 return tuple()
61
62 def __getitem__(self, item):
63 if isinstance(item, slice):
64 assert item.step in [1, None]
65 return vaex.dataset.DatasetSliced(self, item.start or 0, item.stop or self.row_count)
66 return self._columns[item]
67
68 def close(self):
69 # no need to close it, it seem
70 pass
71
72 def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):
73 pool = get_main_io_pool()
74 offset = 0
75 for fragment_large in self._arrow_ds.get_fragments():
76 fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])
77 # when do we want to split up? File size? max chunk size?
78 # if fragment_large_rows > chunk_size:
79 # fragments = fragment_large.split_by_row_group()
80 # else:
81 # # or not
82 # fragments = [fragment_large]
83 import pyarrow.parquet
84 fragments = [fragment_large]
85 for fragment in fragments:
86 rows = sum([rg.num_rows for rg in fragment.row_groups])
87 chunk_start = offset
88 chunk_end = offset + rows
89
90 length = chunk_end - chunk_start # default length
91
92 if start >= chunk_end: # we didn't find the beginning yet
93 offset += length
94 continue
95 if end < chunk_start: # we are past the end
96 # assert False
97 break
98 def reader(fragment=fragment):
99 table = fragment.to_table(columns=columns, use_threads=False)
100 chunks = dict(zip(table.column_names, table.columns))
101 return chunks
102
103 if start > chunk_start:
104 # this means we have to cut off a piece of the beginning
105 if end < chunk_end:
106 # AND the end
107 length = end - chunk_start # without the start cut off
108 length -= start - chunk_start # correcting for the start cut off
109 def slicer(chunk_start=chunk_start, reader=reader, length=length):
110 chunks = reader()
111 chunks = {name: ar.slice(start - chunk_start, length) for name, ar in chunks.items()}
112 for name, ar in chunks.items():
113 assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
114 return chunks
115 reader = slicer
116 else:
117 length -= start - chunk_start # correcting for the start cut off
118 def slicer(chunk_start=chunk_start, reader=reader, length=length):
119 chunks = reader()
120 chunks = {name: ar.slice(start - chunk_start) for name, ar in chunks.items()}
121 for name, ar in chunks.items():
122 assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
123 return chunks
124 reader = slicer
125 else:
126 if end < chunk_end:
127 # we only need to cut off a piece of the end
128 length = end - chunk_start
129 def slicer(chunk_start=chunk_start, reader=reader, length=length):
130 chunks = reader()
131 chunks = {name: ar.slice(0, length) for name, ar in chunks.items()}
132 for name, ar in chunks.items():
133 assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'
134 return chunks
135 reader = slicer
136 offset += rows
137 yield pool.submit(reader)
138
139 def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):
140 chunk_size = chunk_size or 1024*1024
141 i1 = 0
142 chunks_ready_list = []
143 i1 = i2 = 0
144
145 for chunks_future in buffer(self._chunk_producer(columns, chunk_size, start=start, end=end or self._row_count), thread_count_default_io+3):
146 chunks = chunks_future.result()
147 chunks_ready_list.append(chunks)
148 total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])
149 if total_row_count > chunk_size:
150 chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
151 i2 += current_row_count
152 yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)
153 i1 = i2
154
155 while chunks_ready_list:
156 chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)
157 i2 += current_row_count
158 yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)
159 i1 = i2
160
161
162
163 def from_table(table, as_numpy=False):
164 columns = dict(zip(table.schema.names, table.columns))
165 # TODO: this should be an DatasetArrow and/or DatasetParquet
166 dataset = vaex.dataset.DatasetArrays(columns)
167 df = vaex.dataframe.DataFrameLocal(dataset)
168 return df.as_numpy() if as_numpy else df
169
170
171 def open(filename, as_numpy=False):
172 source = pa.memory_map(filename)
173 try:
174 # first we try if it opens as stream
175 reader = pa.ipc.open_stream(source)
176 except pa.lib.ArrowInvalid:
177 # if not, we open as file
178 reader = pa.ipc.open_file(source)
179 # for some reason this reader is not iterable
180 batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]
181 else:
182 # if a stream, we're good
183 batches = reader # this reader is iterable
184 table = pa.Table.from_batches(batches)
185 return from_table(table, as_numpy=as_numpy)
186
187
188 def open_parquet(filename, as_numpy=False):
189 arrow_ds = pyarrow.dataset.dataset(filename)
190 ds = DatasetArrow(arrow_ds)
191 return vaex.from_dataset(ds)
192
193 # vaex.file.other.dataset_type_map["arrow"] = DatasetArrow
194 # vaex.file.other.dataset_type_map["parquet"] = DatasetParquet
195
196
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packages/vaex-core/vaex/arrow/dataset.py b/packages/vaex-core/vaex/arrow/dataset.py
--- a/packages/vaex-core/vaex/arrow/dataset.py
+++ b/packages/vaex-core/vaex/arrow/dataset.py
@@ -30,8 +30,9 @@
class DatasetArrow(vaex.dataset.Dataset):
- def __init__(self, ds):
+ def __init__(self, ds, max_rows_read=1024**2*10):
super().__init__()
+ self.max_rows_read = max_rows_read
self._arrow_ds = ds
row_count = 0
for fragment in self._arrow_ds.get_fragments():
@@ -70,18 +71,15 @@
pass
def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):
+ import pyarrow.parquet
pool = get_main_io_pool()
offset = 0
for fragment_large in self._arrow_ds.get_fragments():
fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])
- # when do we want to split up? File size? max chunk size?
- # if fragment_large_rows > chunk_size:
- # fragments = fragment_large.split_by_row_group()
- # else:
- # # or not
- # fragments = [fragment_large]
- import pyarrow.parquet
fragments = [fragment_large]
+ # when do we want to split up? File size? max chunk size?
+ if fragment_large_rows > self.max_rows_read:
+ fragments = fragment_large.split_by_row_group()
for fragment in fragments:
rows = sum([rg.num_rows for rg in fragment.row_groups])
chunk_start = offset
| {"golden_diff": "diff --git a/packages/vaex-core/vaex/arrow/dataset.py b/packages/vaex-core/vaex/arrow/dataset.py\n--- a/packages/vaex-core/vaex/arrow/dataset.py\n+++ b/packages/vaex-core/vaex/arrow/dataset.py\n@@ -30,8 +30,9 @@\n \n \n class DatasetArrow(vaex.dataset.Dataset):\n- def __init__(self, ds):\n+ def __init__(self, ds, max_rows_read=1024**2*10):\n super().__init__()\n+ self.max_rows_read = max_rows_read\n self._arrow_ds = ds\n row_count = 0\n for fragment in self._arrow_ds.get_fragments():\n@@ -70,18 +71,15 @@\n pass\n \n def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n+ import pyarrow.parquet\n pool = get_main_io_pool()\n offset = 0\n for fragment_large in self._arrow_ds.get_fragments():\n fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])\n- # when do we want to split up? File size? max chunk size?\n- # if fragment_large_rows > chunk_size:\n- # fragments = fragment_large.split_by_row_group()\n- # else:\n- # # or not\n- # fragments = [fragment_large]\n- import pyarrow.parquet\n fragments = [fragment_large]\n+ # when do we want to split up? File size? max chunk size?\n+ if fragment_large_rows > self.max_rows_read:\n+ fragments = fragment_large.split_by_row_group()\n for fragment in fragments:\n rows = sum([rg.num_rows for rg in fragment.row_groups])\n chunk_start = offset\n", "issue": "Allow lazy file opening for parquet formatted files\nWhen I run `open` on an HDF5 file, the operation is done lazily \u2013\u00a0I believe that file handlers are opened but nothing is loaded into RAM. However running `open` on a parquet file results in eager evaluation, where data is immediately loaded into RAM. (Same goes for `open_many`, FWIW). This means that it's impossible to use vaex on large-than-RAM datasets if they're stored as parquet.\r\n\r\nI've put a minimal working example below, although note it'll use like 3-6GB of RAM and dump like 5 GB to your HD:\r\n\r\n```python\r\nimport numpy as np\r\nimport pandas as pd\r\nimport time\r\nimport vaex\r\n\r\n# Creates a roughly 2.4 GB dataframe\r\npandas_df = pd.DataFrame({\r\n \"numeric_1\": np.random.rand(100000000),\r\n \"numeric_2\": np.random.rand(100000000),\r\n \"numeric_3\": np.random.rand(100000000)\r\n})\r\n\r\n# Saves to parquet and vaex-formatted hdf5\r\npandas_df.to_parquet(\"temp_df.parquet\", index=False)\r\nvaex_df = vaex.from_pandas(pandas_df)\r\nvaex_df.export_hdf5(\"temp_df.hdf5\")\r\n\r\ndel vaex_df\r\ndel pandas_df\r\n\r\nstart = time.time()\r\nvaex_df_hdf5 = vaex.open(\"temp_df.hdf5\")\r\nprint(f\"Took {time.time() - start:.2f} seconds to open the hdf5 file\")\r\n\r\nstart = time.time()\r\nvaex_df_parquet = vaex.open(\"temp_df.parquet\")\r\nprint(f\"Took {time.time() - start:.2f} seconds to open the parquet file\")\r\n```\r\n\r\nOutput on my machine:\r\n```\r\nTook 0.01 seconds to open the hdf5 file\r\nTook 7.89 seconds to open the parquet file\r\n```\r\n\r\n(You can also see that RAM usage is up, although printing that to screen from python is trickier \ud83d\ude04.)\r\n\r\nI have no idea if it's possible, but it would be really awesome to get the same lazy opening for parquet files \u2013\u00a0A fair amount of the files I interact with are stored as parquet, and converting to HDF5 would be time consuming and annoying. \n", "before_files": [{"content": "__author__ = 'maartenbreddels'\nimport collections\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\n\nimport pyarrow as pa\nimport pyarrow.dataset\n\nimport vaex.dataset\nimport vaex.file.other\nfrom ..itertools import buffer\n\n\nlogger = logging.getLogger(\"vaex.arrow.dataset\")\n\nthread_count_default_io = os.environ.get('VAEX_NUM_THREADS_IO', multiprocessing.cpu_count() * 2 + 1)\nthread_count_default_io = int(thread_count_default_io)\nmain_io_pool = None\n\nlogger = logging.getLogger(\"vaex.multithreading\")\n\n\ndef get_main_io_pool():\n global main_io_pool\n if main_io_pool is None:\n main_io_pool = concurrent.futures.ThreadPoolExecutor(max_workers=thread_count_default_io)\n return main_io_pool\n\n\nclass DatasetArrow(vaex.dataset.Dataset):\n def __init__(self, ds):\n super().__init__()\n self._arrow_ds = ds\n row_count = 0\n for fragment in self._arrow_ds.get_fragments():\n if hasattr(fragment, \"ensure_complete_metadata\"):\n fragment.ensure_complete_metadata()\n for rg in fragment.row_groups:\n row_count += rg.num_rows\n self._row_count = row_count\n self._columns = {name: vaex.dataset.ColumnProxy(self, name, type) for name, type in\n zip(self._arrow_ds.schema.names, self._arrow_ds.schema.types)}\n self._ids = {}\n\n def hashed(self):\n raise NotImplementedError\n\n def slice(self, start, end):\n # TODO: we can be smarter here, and trim off some fragments\n if start == 0 and end == self.row_count:\n return self\n return vaex.dataset.DatasetSliced(self, start=start, end=end)\n\n def is_masked(self, column):\n return False\n\n def shape(self, column):\n return tuple()\n\n def __getitem__(self, item):\n if isinstance(item, slice):\n assert item.step in [1, None]\n return vaex.dataset.DatasetSliced(self, item.start or 0, item.stop or self.row_count)\n return self._columns[item]\n\n def close(self):\n # no need to close it, it seem\n pass\n\n def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n pool = get_main_io_pool()\n offset = 0\n for fragment_large in self._arrow_ds.get_fragments():\n fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])\n # when do we want to split up? File size? max chunk size?\n # if fragment_large_rows > chunk_size:\n # fragments = fragment_large.split_by_row_group()\n # else:\n # # or not\n # fragments = [fragment_large]\n import pyarrow.parquet\n fragments = [fragment_large]\n for fragment in fragments:\n rows = sum([rg.num_rows for rg in fragment.row_groups])\n chunk_start = offset\n chunk_end = offset + rows\n\n length = chunk_end - chunk_start # default length\n\n if start >= chunk_end: # we didn't find the beginning yet\n offset += length\n continue\n if end < chunk_start: # we are past the end\n # assert False\n break\n def reader(fragment=fragment):\n table = fragment.to_table(columns=columns, use_threads=False)\n chunks = dict(zip(table.column_names, table.columns))\n return chunks\n\n if start > chunk_start:\n # this means we have to cut off a piece of the beginning\n if end < chunk_end:\n # AND the end\n length = end - chunk_start # without the start cut off\n length -= start - chunk_start # correcting for the start cut off\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(start - chunk_start, length) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n else:\n length -= start - chunk_start # correcting for the start cut off\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(start - chunk_start) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n else:\n if end < chunk_end:\n # we only need to cut off a piece of the end\n length = end - chunk_start\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(0, length) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n offset += rows\n yield pool.submit(reader)\n\n def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n chunk_size = chunk_size or 1024*1024\n i1 = 0\n chunks_ready_list = []\n i1 = i2 = 0\n\n for chunks_future in buffer(self._chunk_producer(columns, chunk_size, start=start, end=end or self._row_count), thread_count_default_io+3):\n chunks = chunks_future.result()\n chunks_ready_list.append(chunks)\n total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])\n if total_row_count > chunk_size:\n chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)\n i2 += current_row_count\n yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)\n i1 = i2\n\n while chunks_ready_list:\n chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)\n i2 += current_row_count\n yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)\n i1 = i2\n\n\n\ndef from_table(table, as_numpy=False):\n columns = dict(zip(table.schema.names, table.columns))\n # TODO: this should be an DatasetArrow and/or DatasetParquet\n dataset = vaex.dataset.DatasetArrays(columns)\n df = vaex.dataframe.DataFrameLocal(dataset)\n return df.as_numpy() if as_numpy else df\n\n\ndef open(filename, as_numpy=False):\n source = pa.memory_map(filename)\n try:\n # first we try if it opens as stream\n reader = pa.ipc.open_stream(source)\n except pa.lib.ArrowInvalid:\n # if not, we open as file\n reader = pa.ipc.open_file(source)\n # for some reason this reader is not iterable\n batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]\n else:\n # if a stream, we're good\n batches = reader # this reader is iterable\n table = pa.Table.from_batches(batches)\n return from_table(table, as_numpy=as_numpy)\n\n\ndef open_parquet(filename, as_numpy=False):\n arrow_ds = pyarrow.dataset.dataset(filename)\n ds = DatasetArrow(arrow_ds)\n return vaex.from_dataset(ds)\n\n# vaex.file.other.dataset_type_map[\"arrow\"] = DatasetArrow\n# vaex.file.other.dataset_type_map[\"parquet\"] = DatasetParquet\n\n", "path": "packages/vaex-core/vaex/arrow/dataset.py"}], "after_files": [{"content": "__author__ = 'maartenbreddels'\nimport collections\nimport concurrent.futures\nimport logging\nimport multiprocessing\nimport os\n\nimport pyarrow as pa\nimport pyarrow.dataset\n\nimport vaex.dataset\nimport vaex.file.other\nfrom ..itertools import buffer\n\n\nlogger = logging.getLogger(\"vaex.arrow.dataset\")\n\nthread_count_default_io = os.environ.get('VAEX_NUM_THREADS_IO', multiprocessing.cpu_count() * 2 + 1)\nthread_count_default_io = int(thread_count_default_io)\nmain_io_pool = None\n\nlogger = logging.getLogger(\"vaex.multithreading\")\n\n\ndef get_main_io_pool():\n global main_io_pool\n if main_io_pool is None:\n main_io_pool = concurrent.futures.ThreadPoolExecutor(max_workers=thread_count_default_io)\n return main_io_pool\n\n\nclass DatasetArrow(vaex.dataset.Dataset):\n def __init__(self, ds, max_rows_read=1024**2*10):\n super().__init__()\n self.max_rows_read = max_rows_read\n self._arrow_ds = ds\n row_count = 0\n for fragment in self._arrow_ds.get_fragments():\n if hasattr(fragment, \"ensure_complete_metadata\"):\n fragment.ensure_complete_metadata()\n for rg in fragment.row_groups:\n row_count += rg.num_rows\n self._row_count = row_count\n self._columns = {name: vaex.dataset.ColumnProxy(self, name, type) for name, type in\n zip(self._arrow_ds.schema.names, self._arrow_ds.schema.types)}\n self._ids = {}\n\n def hashed(self):\n raise NotImplementedError\n\n def slice(self, start, end):\n # TODO: we can be smarter here, and trim off some fragments\n if start == 0 and end == self.row_count:\n return self\n return vaex.dataset.DatasetSliced(self, start=start, end=end)\n\n def is_masked(self, column):\n return False\n\n def shape(self, column):\n return tuple()\n\n def __getitem__(self, item):\n if isinstance(item, slice):\n assert item.step in [1, None]\n return vaex.dataset.DatasetSliced(self, item.start or 0, item.stop or self.row_count)\n return self._columns[item]\n\n def close(self):\n # no need to close it, it seem\n pass\n\n def _chunk_producer(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n import pyarrow.parquet\n pool = get_main_io_pool()\n offset = 0\n for fragment_large in self._arrow_ds.get_fragments():\n fragment_large_rows = sum([rg.num_rows for rg in fragment_large.row_groups])\n fragments = [fragment_large]\n # when do we want to split up? File size? max chunk size?\n if fragment_large_rows > self.max_rows_read:\n fragments = fragment_large.split_by_row_group()\n for fragment in fragments:\n rows = sum([rg.num_rows for rg in fragment.row_groups])\n chunk_start = offset\n chunk_end = offset + rows\n\n length = chunk_end - chunk_start # default length\n\n if start >= chunk_end: # we didn't find the beginning yet\n offset += length\n continue\n if end < chunk_start: # we are past the end\n # assert False\n break\n def reader(fragment=fragment):\n table = fragment.to_table(columns=columns, use_threads=False)\n chunks = dict(zip(table.column_names, table.columns))\n return chunks\n\n if start > chunk_start:\n # this means we have to cut off a piece of the beginning\n if end < chunk_end:\n # AND the end\n length = end - chunk_start # without the start cut off\n length -= start - chunk_start # correcting for the start cut off\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(start - chunk_start, length) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n else:\n length -= start - chunk_start # correcting for the start cut off\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(start - chunk_start) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n else:\n if end < chunk_end:\n # we only need to cut off a piece of the end\n length = end - chunk_start\n def slicer(chunk_start=chunk_start, reader=reader, length=length):\n chunks = reader()\n chunks = {name: ar.slice(0, length) for name, ar in chunks.items()}\n for name, ar in chunks.items():\n assert len(ar) == length, f'Oops, array was expected to be of length {length} but was {len(ar)}'\n return chunks\n reader = slicer\n offset += rows\n yield pool.submit(reader)\n\n def chunk_iterator(self, columns, chunk_size=None, reverse=False, start=0, end=None):\n chunk_size = chunk_size or 1024*1024\n i1 = 0\n chunks_ready_list = []\n i1 = i2 = 0\n\n for chunks_future in buffer(self._chunk_producer(columns, chunk_size, start=start, end=end or self._row_count), thread_count_default_io+3):\n chunks = chunks_future.result()\n chunks_ready_list.append(chunks)\n total_row_count = sum([len(list(k.values())[0]) for k in chunks_ready_list])\n if total_row_count > chunk_size:\n chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)\n i2 += current_row_count\n yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)\n i1 = i2\n\n while chunks_ready_list:\n chunks_current_list, current_row_count = vaex.dataset._slice_of_chunks(chunks_ready_list, chunk_size)\n i2 += current_row_count\n yield i1, i2, vaex.dataset._concat_chunk_list(chunks_current_list)\n i1 = i2\n\n\n\ndef from_table(table, as_numpy=False):\n columns = dict(zip(table.schema.names, table.columns))\n # TODO: this should be an DatasetArrow and/or DatasetParquet\n dataset = vaex.dataset.DatasetArrays(columns)\n df = vaex.dataframe.DataFrameLocal(dataset)\n return df.as_numpy() if as_numpy else df\n\n\ndef open(filename, as_numpy=False):\n source = pa.memory_map(filename)\n try:\n # first we try if it opens as stream\n reader = pa.ipc.open_stream(source)\n except pa.lib.ArrowInvalid:\n # if not, we open as file\n reader = pa.ipc.open_file(source)\n # for some reason this reader is not iterable\n batches = [reader.get_batch(i) for i in range(reader.num_record_batches)]\n else:\n # if a stream, we're good\n batches = reader # this reader is iterable\n table = pa.Table.from_batches(batches)\n return from_table(table, as_numpy=as_numpy)\n\n\ndef open_parquet(filename, as_numpy=False):\n arrow_ds = pyarrow.dataset.dataset(filename)\n ds = DatasetArrow(arrow_ds)\n return vaex.from_dataset(ds)\n\n# vaex.file.other.dataset_type_map[\"arrow\"] = DatasetArrow\n# vaex.file.other.dataset_type_map[\"parquet\"] = DatasetParquet\n\n", "path": "packages/vaex-core/vaex/arrow/dataset.py"}]} | 2,978 | 404 |
gh_patches_debug_7686 | rasdani/github-patches | git_diff | ivy-llc__ivy-22038 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
max_pool3d
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ivy/functional/frontends/torch/nn/functional/pooling_functions.py`
Content:
```
1 # global
2 from functools import reduce
3
4 # local
5 import ivy
6 from ivy import with_unsupported_dtypes
7 from ivy.functional.frontends.torch.func_wrapper import (
8 to_ivy_arrays_and_back,
9 )
10
11
12 # --- Helpers --- #
13 # --------------- #
14
15
16 def _broadcast_pooling_helper(x, pool_dims: str = "2d", name: str = "padding"):
17 dims = {"1d": 1, "2d": 2, "3d": 3}
18
19 if isinstance(x, int):
20 return tuple([x for _ in range(dims[pool_dims])])
21
22 if len(x) == 1:
23 return tuple([x[0] for _ in range(dims[pool_dims])])
24 elif len(x) == dims[pool_dims]:
25 return tuple(x)
26 elif len(x) != dims[pool_dims]:
27 raise ValueError(
28 f"`{name}` must either be a single int, "
29 f"or a tuple of {dims[pool_dims]} ints. "
30 )
31
32
33 # --- Main --- #
34 # ------------ #
35
36
37 @with_unsupported_dtypes(
38 {
39 "2.0.1 and below": (
40 "bfloat16",
41 "float16",
42 )
43 },
44 "torch",
45 )
46 @to_ivy_arrays_and_back
47 def adaptive_avg_pool1d(input, output_size):
48 return ivy.adaptive_avg_pool1d(input, output_size)
49
50
51 @with_unsupported_dtypes(
52 {
53 "2.0.1 and below": (
54 "float16",
55 "bfloat16",
56 )
57 },
58 "torch",
59 )
60 @to_ivy_arrays_and_back
61 def adaptive_avg_pool2d(input, output_size):
62 return ivy.adaptive_avg_pool2d(input, output_size)
63
64
65 @to_ivy_arrays_and_back
66 def adaptive_max_pool2d(
67 input,
68 output_size,
69 return_indices=False,
70 ):
71 # ToDo: Add return_indices once superset is implemented
72 return ivy.adaptive_max_pool2d(input, output_size)
73
74
75 @to_ivy_arrays_and_back
76 def avg_pool1d(
77 input,
78 kernel_size,
79 stride=None,
80 padding=0,
81 ceil_mode=False,
82 count_include_pad=True,
83 ):
84 if stride is None:
85 stride = kernel_size
86 data_format = "NCW"
87 # TODO: remove the broadcasting and padding string specification when ivy.avg_pool
88 # support explicit padding
89 kernel_size = _broadcast_pooling_helper(kernel_size, "1d", name="kernel_size")
90 padding = _broadcast_pooling_helper(padding, "1d", name="padding")
91 if all(
92 [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]
93 ):
94 padding = "SAME"
95 else:
96 padding = "VALID"
97 return ivy.avg_pool1d(
98 input,
99 kernel_size,
100 stride,
101 padding,
102 data_format=data_format,
103 count_include_pad=count_include_pad,
104 ceil_mode=ceil_mode,
105 )
106
107
108 @to_ivy_arrays_and_back
109 def avg_pool2d(
110 input,
111 kernel_size,
112 stride=None,
113 padding=0,
114 ceil_mode=False,
115 count_include_pad=True,
116 divisor_override=None,
117 ):
118 if stride is None:
119 stride = kernel_size
120 data_format = "NCHW"
121 # TODO: remove the broadcasting and padding string specification when ivy.avg_pool
122 # support explicit padding
123 kernel_size = _broadcast_pooling_helper(kernel_size, "2d", name="kernel_size")
124 padding = _broadcast_pooling_helper(padding, "2d", name="padding")
125 if all(
126 [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]
127 ):
128 padding = "SAME"
129 else:
130 padding = "VALID"
131 return ivy.avg_pool2d(
132 input,
133 kernel_size,
134 stride,
135 padding,
136 data_format=data_format,
137 ceil_mode=ceil_mode,
138 count_include_pad=count_include_pad,
139 divisor_override=divisor_override,
140 )
141
142
143 @to_ivy_arrays_and_back
144 def avg_pool3d(
145 input,
146 kernel_size,
147 stride=None,
148 padding=0,
149 ceil_mode=False,
150 count_include_pad=True,
151 divisor_override=None,
152 ):
153 if stride is None:
154 stride = kernel_size
155 # TODO: remove the broadcasting and padding string specification when ivy.avg_pool
156 # support explicit padding
157 kernel_size = _broadcast_pooling_helper(kernel_size, "3d", name="kernel_size")
158 padding = _broadcast_pooling_helper(padding, "3d", name="padding")
159 if all(
160 [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]
161 ):
162 padding = "SAME"
163 else:
164 padding = "VALID"
165 return ivy.avg_pool3d(
166 input,
167 kernel_size,
168 stride,
169 padding,
170 data_format="NCDHW",
171 ceil_mode=ceil_mode,
172 count_include_pad=count_include_pad,
173 divisor_override=divisor_override,
174 )
175
176
177 @with_unsupported_dtypes(
178 {
179 "2.0.1 and below": (
180 "float16",
181 "bfloat16",
182 )
183 },
184 "torch",
185 )
186 @to_ivy_arrays_and_back
187 def lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
188 data_format = "NCW"
189 padding = "VALID"
190 if stride is None:
191 stride = kernel_size
192 if not isinstance(kernel_size, int):
193 kernel_mul = reduce(lambda x, y: x * y, kernel_size)
194 else:
195 kernel_mul = kernel_size
196
197 out = ivy.avg_pool1d(
198 ivy.pow(input, norm_type),
199 kernel_size,
200 stride,
201 padding,
202 data_format=data_format,
203 ceil_mode=ceil_mode,
204 )
205 p = 1.0 / norm_type if norm_type != 0 else 1.0
206 return ivy.pow(ivy.multiply(out, kernel_mul), p)
207
208
209 @to_ivy_arrays_and_back
210 def lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):
211 data_format = "NCHW"
212 padding = "VALID"
213 if stride is None:
214 stride = kernel_size
215 out = ivy.avg_pool2d(
216 ivy.pow(input, norm_type),
217 kernel_size,
218 stride,
219 padding,
220 data_format=data_format,
221 ceil_mode=ceil_mode,
222 )
223 if not isinstance(kernel_size, int):
224 kernel_mul = reduce(lambda x, y: x * y, kernel_size)
225 else:
226 kernel_mul = kernel_size
227 p = ivy.divide(1.0, norm_type) if norm_type != 0 else 1.0
228 return ivy.pow(ivy.multiply(out, kernel_mul), p).astype(input.dtype)
229
230
231 @to_ivy_arrays_and_back
232 def max_pool1d(
233 input,
234 kernel_size,
235 stride=None,
236 padding=0,
237 ceil_mode=False,
238 dilation=1,
239 return_indices=False,
240 ):
241 if stride is None:
242 stride = kernel_size
243 data_format = "NCW"
244 return ivy.max_pool1d(
245 input,
246 kernel_size,
247 stride,
248 padding,
249 data_format=data_format,
250 dilation=dilation,
251 ceil_mode=ceil_mode,
252 )
253
254
255 @with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch")
256 @to_ivy_arrays_and_back
257 def max_pool2d(
258 input,
259 kernel_size,
260 stride=None,
261 padding=0,
262 dilation=1,
263 ceil_mode=False,
264 return_indices=False,
265 ):
266 if stride is None:
267 stride = kernel_size
268 return ivy.max_pool2d(
269 input,
270 kernel_size,
271 stride,
272 padding,
273 data_format="NCHW",
274 dilation=dilation,
275 ceil_mode=ceil_mode,
276 )
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
--- a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
+++ b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py
@@ -274,3 +274,28 @@
dilation=dilation,
ceil_mode=ceil_mode,
)
+
+
+@with_unsupported_dtypes({"2.0.1 and below": ("float16",)}, "torch")
+@to_ivy_arrays_and_back
+def max_pool3d(
+ input,
+ kernel_size,
+ stride=None,
+ padding=0,
+ dilation=1,
+ ceil_mode=False,
+ return_indices=False,
+):
+ if stride is None:
+ stride = kernel_size
+
+ return ivy.max_pool3d(
+ input,
+ kernel_size,
+ stride,
+ padding,
+ data_format="NCDHW",
+ dilation=dilation,
+ ceil_mode=ceil_mode,
+ )
| {"golden_diff": "diff --git a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n--- a/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n+++ b/ivy/functional/frontends/torch/nn/functional/pooling_functions.py\n@@ -274,3 +274,28 @@\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n+\n+\n+@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n+@to_ivy_arrays_and_back\n+def max_pool3d(\n+ input,\n+ kernel_size,\n+ stride=None,\n+ padding=0,\n+ dilation=1,\n+ ceil_mode=False,\n+ return_indices=False,\n+):\n+ if stride is None:\n+ stride = kernel_size\n+\n+ return ivy.max_pool3d(\n+ input,\n+ kernel_size,\n+ stride,\n+ padding,\n+ data_format=\"NCDHW\",\n+ dilation=dilation,\n+ ceil_mode=ceil_mode,\n+ )\n", "issue": "max_pool3d\n\n", "before_files": [{"content": "# global\nfrom functools import reduce\n\n# local\nimport ivy\nfrom ivy import with_unsupported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _broadcast_pooling_helper(x, pool_dims: str = \"2d\", name: str = \"padding\"):\n dims = {\"1d\": 1, \"2d\": 2, \"3d\": 3}\n\n if isinstance(x, int):\n return tuple([x for _ in range(dims[pool_dims])])\n\n if len(x) == 1:\n return tuple([x[0] for _ in range(dims[pool_dims])])\n elif len(x) == dims[pool_dims]:\n return tuple(x)\n elif len(x) != dims[pool_dims]:\n raise ValueError(\n f\"`{name}` must either be a single int, \"\n f\"or a tuple of {dims[pool_dims]} ints. \"\n )\n\n\n# --- Main --- #\n# ------------ #\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"bfloat16\",\n \"float16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool1d(input, output_size):\n return ivy.adaptive_avg_pool1d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool2d(input, output_size):\n return ivy.adaptive_avg_pool2d(input, output_size)\n\n\n@to_ivy_arrays_and_back\ndef adaptive_max_pool2d(\n input,\n output_size,\n return_indices=False,\n):\n # ToDo: Add return_indices once superset is implemented\n return ivy.adaptive_max_pool2d(input, output_size)\n\n\n@to_ivy_arrays_and_back\ndef avg_pool1d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCW\"\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"1d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"1d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool1d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCHW\"\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"2d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"2d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n divisor_override=divisor_override,\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool3d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n if stride is None:\n stride = kernel_size\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"3d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"3d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool3d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCDHW\",\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n divisor_override=divisor_override,\n )\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n data_format = \"NCW\"\n padding = \"VALID\"\n if stride is None:\n stride = kernel_size\n if not isinstance(kernel_size, int):\n kernel_mul = reduce(lambda x, y: x * y, kernel_size)\n else:\n kernel_mul = kernel_size\n\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n )\n p = 1.0 / norm_type if norm_type != 0 else 1.0\n return ivy.pow(ivy.multiply(out, kernel_mul), p)\n\n\n@to_ivy_arrays_and_back\ndef lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n data_format = \"NCHW\"\n padding = \"VALID\"\n if stride is None:\n stride = kernel_size\n out = ivy.avg_pool2d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n )\n if not isinstance(kernel_size, int):\n kernel_mul = reduce(lambda x, y: x * y, kernel_size)\n else:\n kernel_mul = kernel_size\n p = ivy.divide(1.0, norm_type) if norm_type != 0 else 1.0\n return ivy.pow(ivy.multiply(out, kernel_mul), p).astype(input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef max_pool1d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n dilation=1,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCW\"\n return ivy.max_pool1d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n\n\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef max_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n dilation=1,\n ceil_mode=False,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n return ivy.max_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCHW\",\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n", "path": "ivy/functional/frontends/torch/nn/functional/pooling_functions.py"}], "after_files": [{"content": "# global\nfrom functools import reduce\n\n# local\nimport ivy\nfrom ivy import with_unsupported_dtypes\nfrom ivy.functional.frontends.torch.func_wrapper import (\n to_ivy_arrays_and_back,\n)\n\n\n# --- Helpers --- #\n# --------------- #\n\n\ndef _broadcast_pooling_helper(x, pool_dims: str = \"2d\", name: str = \"padding\"):\n dims = {\"1d\": 1, \"2d\": 2, \"3d\": 3}\n\n if isinstance(x, int):\n return tuple([x for _ in range(dims[pool_dims])])\n\n if len(x) == 1:\n return tuple([x[0] for _ in range(dims[pool_dims])])\n elif len(x) == dims[pool_dims]:\n return tuple(x)\n elif len(x) != dims[pool_dims]:\n raise ValueError(\n f\"`{name}` must either be a single int, \"\n f\"or a tuple of {dims[pool_dims]} ints. \"\n )\n\n\n# --- Main --- #\n# ------------ #\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"bfloat16\",\n \"float16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool1d(input, output_size):\n return ivy.adaptive_avg_pool1d(input, output_size)\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef adaptive_avg_pool2d(input, output_size):\n return ivy.adaptive_avg_pool2d(input, output_size)\n\n\n@to_ivy_arrays_and_back\ndef adaptive_max_pool2d(\n input,\n output_size,\n return_indices=False,\n):\n # ToDo: Add return_indices once superset is implemented\n return ivy.adaptive_max_pool2d(input, output_size)\n\n\n@to_ivy_arrays_and_back\ndef avg_pool1d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCW\"\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"1d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"1d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool1d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n count_include_pad=count_include_pad,\n ceil_mode=ceil_mode,\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCHW\"\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"2d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"2d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n divisor_override=divisor_override,\n )\n\n\n@to_ivy_arrays_and_back\ndef avg_pool3d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n count_include_pad=True,\n divisor_override=None,\n):\n if stride is None:\n stride = kernel_size\n # TODO: remove the broadcasting and padding string specification when ivy.avg_pool\n # support explicit padding\n kernel_size = _broadcast_pooling_helper(kernel_size, \"3d\", name=\"kernel_size\")\n padding = _broadcast_pooling_helper(padding, \"3d\", name=\"padding\")\n if all(\n [pad == ivy.ceil((kernel - 1) / 2) for kernel, pad in zip(kernel_size, padding)]\n ):\n padding = \"SAME\"\n else:\n padding = \"VALID\"\n return ivy.avg_pool3d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCDHW\",\n ceil_mode=ceil_mode,\n count_include_pad=count_include_pad,\n divisor_override=divisor_override,\n )\n\n\n@with_unsupported_dtypes(\n {\n \"2.0.1 and below\": (\n \"float16\",\n \"bfloat16\",\n )\n },\n \"torch\",\n)\n@to_ivy_arrays_and_back\ndef lp_pool1d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n data_format = \"NCW\"\n padding = \"VALID\"\n if stride is None:\n stride = kernel_size\n if not isinstance(kernel_size, int):\n kernel_mul = reduce(lambda x, y: x * y, kernel_size)\n else:\n kernel_mul = kernel_size\n\n out = ivy.avg_pool1d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n )\n p = 1.0 / norm_type if norm_type != 0 else 1.0\n return ivy.pow(ivy.multiply(out, kernel_mul), p)\n\n\n@to_ivy_arrays_and_back\ndef lp_pool2d(input, norm_type, kernel_size, stride=None, ceil_mode=False):\n data_format = \"NCHW\"\n padding = \"VALID\"\n if stride is None:\n stride = kernel_size\n out = ivy.avg_pool2d(\n ivy.pow(input, norm_type),\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n ceil_mode=ceil_mode,\n )\n if not isinstance(kernel_size, int):\n kernel_mul = reduce(lambda x, y: x * y, kernel_size)\n else:\n kernel_mul = kernel_size\n p = ivy.divide(1.0, norm_type) if norm_type != 0 else 1.0\n return ivy.pow(ivy.multiply(out, kernel_mul), p).astype(input.dtype)\n\n\n@to_ivy_arrays_and_back\ndef max_pool1d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n ceil_mode=False,\n dilation=1,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n data_format = \"NCW\"\n return ivy.max_pool1d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=data_format,\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n\n\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef max_pool2d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n dilation=1,\n ceil_mode=False,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n return ivy.max_pool2d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCHW\",\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n\n\n@with_unsupported_dtypes({\"2.0.1 and below\": (\"float16\",)}, \"torch\")\n@to_ivy_arrays_and_back\ndef max_pool3d(\n input,\n kernel_size,\n stride=None,\n padding=0,\n dilation=1,\n ceil_mode=False,\n return_indices=False,\n):\n if stride is None:\n stride = kernel_size\n\n return ivy.max_pool3d(\n input,\n kernel_size,\n stride,\n padding,\n data_format=\"NCDHW\",\n dilation=dilation,\n ceil_mode=ceil_mode,\n )\n", "path": "ivy/functional/frontends/torch/nn/functional/pooling_functions.py"}]} | 2,766 | 260 |
gh_patches_debug_11674 | rasdani/github-patches | git_diff | sopel-irc__sopel-1270 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[wikipedia] Error on nonexistent article link
I productively (? :laughing:) found another issue in the `wikipedia` module while testing my fix for #1255. It turns out that, if someone sends a link to a Wikipedia article that doesn't exist, the bot spits out a nice `KeyError`. Better to spit out a clean error (or, perhaps, nothing).
As with #1255, I'm testing a fix for this on my own Sopel instance. Once I'm happy with it (and the wording of whatever error message I add), there will be a PR for feedback before merging.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sopel/modules/wikipedia.py`
Content:
```
1 # coding=utf-8
2 # Copyright 2013 Elsie Powell - embolalia.com
3 # Licensed under the Eiffel Forum License 2.
4 from __future__ import unicode_literals, absolute_import, print_function, division
5 from sopel import web, tools
6 from sopel.config.types import StaticSection, ValidatedAttribute
7 from sopel.module import NOLIMIT, commands, example, rule
8 import json
9 import re
10
11 import sys
12 if sys.version_info.major < 3:
13 from urlparse import unquote as _unquote
14 unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')
15 else:
16 from urllib.parse import unquote
17
18 REDIRECT = re.compile(r'^REDIRECT (.*)')
19
20
21 class WikipediaSection(StaticSection):
22 default_lang = ValidatedAttribute('default_lang', default='en')
23 """The default language to find articles from."""
24 lang_per_channel = ValidatedAttribute('lang_per_channel')
25
26
27 def setup(bot):
28 bot.config.define_section('wikipedia', WikipediaSection)
29
30 regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')
31 if not bot.memory.contains('url_callbacks'):
32 bot.memory['url_callbacks'] = tools.SopelMemory()
33 bot.memory['url_callbacks'][regex] = mw_info
34
35
36 def configure(config):
37 config.define_section('wikipedia', WikipediaSection)
38 config.wikipedia.configure_setting(
39 'default_lang',
40 "Enter the default language to find articles from."
41 )
42
43
44 def mw_search(server, query, num):
45 """
46 Searches the specified MediaWiki server for the given query, and returns
47 the specified number of results.
48 """
49 search_url = ('http://%s/w/api.php?format=json&action=query'
50 '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'
51 '&srsearch=') % (server, num)
52 search_url += query
53 query = json.loads(web.get(search_url))
54 if 'query' in query:
55 query = query['query']['search']
56 return [r['title'] for r in query]
57 else:
58 return None
59
60
61 def say_snippet(bot, server, query, show_url=True):
62 page_name = query.replace('_', ' ')
63 query = query.replace(' ', '_')
64 snippet = mw_snippet(server, query)
65 msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
66 if show_url:
67 msg = msg + ' | https://{}/wiki/{}'.format(server, query)
68 bot.say(msg)
69
70
71 def mw_snippet(server, query):
72 """
73 Retrives a snippet of the specified length from the given page on the given
74 server.
75 """
76 snippet_url = ('https://' + server + '/w/api.php?format=json'
77 '&action=query&prop=extracts&exintro&explaintext'
78 '&exchars=300&redirects&titles=')
79 snippet_url += query
80 snippet = json.loads(web.get(snippet_url))
81 snippet = snippet['query']['pages']
82
83 # For some reason, the API gives the page *number* as the key, so we just
84 # grab the first page number in the results.
85 snippet = snippet[list(snippet.keys())[0]]
86
87 return snippet['extract']
88
89
90 @rule('.*\/([a-z]+\.wikipedia.org)\/wiki\/((?!File\:)[^ ]+).*')
91 def mw_info(bot, trigger, found_match=None):
92 """
93 Retrives a snippet of the specified length from the given page on the given
94 server.
95 """
96 match = found_match or trigger
97 say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)
98
99
100 @commands('w', 'wiki', 'wik')
101 @example('.w San Francisco')
102 def wikipedia(bot, trigger):
103 lang = bot.config.wikipedia.default_lang
104
105 #change lang if channel has custom language set
106 if (trigger.sender and not trigger.sender.is_nick() and
107 bot.config.wikipedia.lang_per_channel):
108 customlang = re.search('(' + trigger.sender + '):(\w+)',
109 bot.config.wikipedia.lang_per_channel)
110 if customlang is not None:
111 lang = customlang.group(2)
112
113 if trigger.group(2) is None:
114 bot.reply("What do you want me to look up?")
115 return NOLIMIT
116
117 query = trigger.group(2)
118 args = re.search(r'^-([a-z]{2,12})\s(.*)', query)
119 if args is not None:
120 lang = args.group(1)
121 query = args.group(2)
122
123 if not query:
124 bot.reply('What do you want me to look up?')
125 return NOLIMIT
126 server = lang + '.wikipedia.org'
127 query = mw_search(server, query, 1)
128 if not query:
129 bot.reply("I can't find any results for that.")
130 return NOLIMIT
131 else:
132 query = query[0]
133 say_snippet(bot, server, query)
134
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py
--- a/sopel/modules/wikipedia.py
+++ b/sopel/modules/wikipedia.py
@@ -61,7 +61,12 @@
def say_snippet(bot, server, query, show_url=True):
page_name = query.replace('_', ' ')
query = query.replace(' ', '_')
- snippet = mw_snippet(server, query)
+ try:
+ snippet = mw_snippet(server, query)
+ except KeyError:
+ if show_url:
+ bot.say("[WIKIPEDIA] Error fetching snippet for \"{}\".".format(page_name))
+ return
msg = '[WIKIPEDIA] {} | "{}"'.format(page_name, snippet)
if show_url:
msg = msg + ' | https://{}/wiki/{}'.format(server, query)
| {"golden_diff": "diff --git a/sopel/modules/wikipedia.py b/sopel/modules/wikipedia.py\n--- a/sopel/modules/wikipedia.py\n+++ b/sopel/modules/wikipedia.py\n@@ -61,7 +61,12 @@\n def say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n- snippet = mw_snippet(server, query)\n+ try:\n+ snippet = mw_snippet(server, query)\n+ except KeyError:\n+ if show_url:\n+ bot.say(\"[WIKIPEDIA] Error fetching snippet for \\\"{}\\\".\".format(page_name))\n+ return\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n", "issue": "[wikipedia] Error on nonexistent article link\nI productively (? :laughing:) found another issue in the `wikipedia` module while testing my fix for #1255. It turns out that, if someone sends a link to a Wikipedia article that doesn't exist, the bot spits out a nice `KeyError`. Better to spit out a clean error (or, perhaps, nothing).\r\n\r\nAs with #1255, I'm testing a fix for this on my own Sopel instance. Once I'm happy with it (and the wording of whatever error message I add), there will be a PR for feedback before merging.\n", "before_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell - embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ValidatedAttribute\nfrom sopel.module import NOLIMIT, commands, example, rule\nimport json\nimport re\n\nimport sys\nif sys.version_info.major < 3:\n from urlparse import unquote as _unquote\n unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\nelse:\n from urllib.parse import unquote\n\nREDIRECT = re.compile(r'^REDIRECT (.*)')\n\n\nclass WikipediaSection(StaticSection):\n default_lang = ValidatedAttribute('default_lang', default='en')\n \"\"\"The default language to find articles from.\"\"\"\n lang_per_channel = ValidatedAttribute('lang_per_channel')\n\n\ndef setup(bot):\n bot.config.define_section('wikipedia', WikipediaSection)\n\n regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n bot.memory['url_callbacks'][regex] = mw_info\n\n\ndef configure(config):\n config.define_section('wikipedia', WikipediaSection)\n config.wikipedia.configure_setting(\n 'default_lang',\n \"Enter the default language to find articles from.\"\n )\n\n\ndef mw_search(server, query, num):\n \"\"\"\n Searches the specified MediaWiki server for the given query, and returns\n the specified number of results.\n \"\"\"\n search_url = ('http://%s/w/api.php?format=json&action=query'\n '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'\n '&srsearch=') % (server, num)\n search_url += query\n query = json.loads(web.get(search_url))\n if 'query' in query:\n query = query['query']['search']\n return [r['title'] for r in query]\n else:\n return None\n\n\ndef say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n snippet = mw_snippet(server, query)\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n bot.say(msg)\n\n\ndef mw_snippet(server, query):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n snippet_url = ('https://' + server + '/w/api.php?format=json'\n '&action=query&prop=extracts&exintro&explaintext'\n '&exchars=300&redirects&titles=')\n snippet_url += query\n snippet = json.loads(web.get(snippet_url))\n snippet = snippet['query']['pages']\n\n # For some reason, the API gives the page *number* as the key, so we just\n # grab the first page number in the results.\n snippet = snippet[list(snippet.keys())[0]]\n\n return snippet['extract']\n\n\n@rule('.*\\/([a-z]+\\.wikipedia.org)\\/wiki\\/((?!File\\:)[^ ]+).*')\ndef mw_info(bot, trigger, found_match=None):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n match = found_match or trigger\n say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n\n\n@commands('w', 'wiki', 'wik')\n@example('.w San Francisco')\ndef wikipedia(bot, trigger):\n lang = bot.config.wikipedia.default_lang\n\n #change lang if channel has custom language set\n if (trigger.sender and not trigger.sender.is_nick() and\n bot.config.wikipedia.lang_per_channel):\n customlang = re.search('(' + trigger.sender + '):(\\w+)',\n bot.config.wikipedia.lang_per_channel)\n if customlang is not None:\n lang = customlang.group(2)\n\n if trigger.group(2) is None:\n bot.reply(\"What do you want me to look up?\")\n return NOLIMIT\n\n query = trigger.group(2)\n args = re.search(r'^-([a-z]{2,12})\\s(.*)', query)\n if args is not None:\n lang = args.group(1)\n query = args.group(2)\n\n if not query:\n bot.reply('What do you want me to look up?')\n return NOLIMIT\n server = lang + '.wikipedia.org'\n query = mw_search(server, query, 1)\n if not query:\n bot.reply(\"I can't find any results for that.\")\n return NOLIMIT\n else:\n query = query[0]\n say_snippet(bot, server, query)\n", "path": "sopel/modules/wikipedia.py"}], "after_files": [{"content": "# coding=utf-8\n# Copyright 2013 Elsie Powell - embolalia.com\n# Licensed under the Eiffel Forum License 2.\nfrom __future__ import unicode_literals, absolute_import, print_function, division\nfrom sopel import web, tools\nfrom sopel.config.types import StaticSection, ValidatedAttribute\nfrom sopel.module import NOLIMIT, commands, example, rule\nimport json\nimport re\n\nimport sys\nif sys.version_info.major < 3:\n from urlparse import unquote as _unquote\n unquote = lambda s: _unquote(s.encode('utf-8')).decode('utf-8')\nelse:\n from urllib.parse import unquote\n\nREDIRECT = re.compile(r'^REDIRECT (.*)')\n\n\nclass WikipediaSection(StaticSection):\n default_lang = ValidatedAttribute('default_lang', default='en')\n \"\"\"The default language to find articles from.\"\"\"\n lang_per_channel = ValidatedAttribute('lang_per_channel')\n\n\ndef setup(bot):\n bot.config.define_section('wikipedia', WikipediaSection)\n\n regex = re.compile('([a-z]+).(wikipedia.org/wiki/)([^ ]+)')\n if not bot.memory.contains('url_callbacks'):\n bot.memory['url_callbacks'] = tools.SopelMemory()\n bot.memory['url_callbacks'][regex] = mw_info\n\n\ndef configure(config):\n config.define_section('wikipedia', WikipediaSection)\n config.wikipedia.configure_setting(\n 'default_lang',\n \"Enter the default language to find articles from.\"\n )\n\n\ndef mw_search(server, query, num):\n \"\"\"\n Searches the specified MediaWiki server for the given query, and returns\n the specified number of results.\n \"\"\"\n search_url = ('http://%s/w/api.php?format=json&action=query'\n '&list=search&srlimit=%d&srprop=timestamp&srwhat=text'\n '&srsearch=') % (server, num)\n search_url += query\n query = json.loads(web.get(search_url))\n if 'query' in query:\n query = query['query']['search']\n return [r['title'] for r in query]\n else:\n return None\n\n\ndef say_snippet(bot, server, query, show_url=True):\n page_name = query.replace('_', ' ')\n query = query.replace(' ', '_')\n try:\n snippet = mw_snippet(server, query)\n except KeyError:\n if show_url:\n bot.say(\"[WIKIPEDIA] Error fetching snippet for \\\"{}\\\".\".format(page_name))\n return\n msg = '[WIKIPEDIA] {} | \"{}\"'.format(page_name, snippet)\n if show_url:\n msg = msg + ' | https://{}/wiki/{}'.format(server, query)\n bot.say(msg)\n\n\ndef mw_snippet(server, query):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n snippet_url = ('https://' + server + '/w/api.php?format=json'\n '&action=query&prop=extracts&exintro&explaintext'\n '&exchars=300&redirects&titles=')\n snippet_url += query\n snippet = json.loads(web.get(snippet_url))\n snippet = snippet['query']['pages']\n\n # For some reason, the API gives the page *number* as the key, so we just\n # grab the first page number in the results.\n snippet = snippet[list(snippet.keys())[0]]\n\n return snippet['extract']\n\n\n@rule('.*\\/([a-z]+\\.wikipedia.org)\\/wiki\\/((?!File\\:)[^ ]+).*')\ndef mw_info(bot, trigger, found_match=None):\n \"\"\"\n Retrives a snippet of the specified length from the given page on the given\n server.\n \"\"\"\n match = found_match or trigger\n say_snippet(bot, match.group(1), unquote(match.group(2)), show_url=False)\n\n\n@commands('w', 'wiki', 'wik')\n@example('.w San Francisco')\ndef wikipedia(bot, trigger):\n lang = bot.config.wikipedia.default_lang\n\n #change lang if channel has custom language set\n if (trigger.sender and not trigger.sender.is_nick() and\n bot.config.wikipedia.lang_per_channel):\n customlang = re.search('(' + trigger.sender + '):(\\w+)',\n bot.config.wikipedia.lang_per_channel)\n if customlang is not None:\n lang = customlang.group(2)\n\n if trigger.group(2) is None:\n bot.reply(\"What do you want me to look up?\")\n return NOLIMIT\n\n query = trigger.group(2)\n args = re.search(r'^-([a-z]{2,12})\\s(.*)', query)\n if args is not None:\n lang = args.group(1)\n query = args.group(2)\n\n if not query:\n bot.reply('What do you want me to look up?')\n return NOLIMIT\n server = lang + '.wikipedia.org'\n query = mw_search(server, query, 1)\n if not query:\n bot.reply(\"I can't find any results for that.\")\n return NOLIMIT\n else:\n query = query[0]\n say_snippet(bot, server, query)\n", "path": "sopel/modules/wikipedia.py"}]} | 1,792 | 191 |
gh_patches_debug_39425 | rasdani/github-patches | git_diff | microsoft__DeepSpeed-3682 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] "Setting ds_accelerator" uses `print` not `logger`
**Describe the bug**
When `accelerate` tries to run any command while `DeepSpeed` is available, we get `"Setting ds_accelerator ..."`, which while not explicitly bad, is making a test fail due to a non-clean CLI print when non-deepspeed code is used (we have a check for `is_deepspeed_available()` which is probably triggering this.
As a nice QOL, it would be good to have `get_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L102) and `set_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L109) use the already-existing logger in the framework if possible, so that we can disable these annoying prints when they're not needed :)
**To Reproduce**
Steps to reproduce the behavior:
1. `pip install accelerate deepspeed -U`
2. Create a file with:
```python
from accelerate.commands.tpu import tpu_command_parser, tpu_command_launcher
parser = tpu_command_parser()
args = parser.parse_args([
"--config_file", "tests/test_configs/latest.yaml",
"--install_accelerate",
"--debug"
])
tpu_command_launcher(args)
```
3. Run `python {my_file_name.py}`
4. Should print:
```bash
Setting ds_accelerator to cuda (auto detect)
Running gcloud compute tpus tpu-vm ssh test-tpu --zone us-central1-a --command cd /usr/share; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all
```
**Expected behavior**
A configurable option to silence these print statements by having them run through the logging system instead, perhaps as an `info` or as a `debug`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `accelerator/real_accelerator.py`
Content:
```
1 # Copyright (c) Microsoft Corporation.
2 # SPDX-License-Identifier: Apache-2.0
3
4 # DeepSpeed Team
5 import os
6
7 try:
8 from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
9 except ImportError as e:
10 dsa1 = None
11 try:
12 from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2
13 except ImportError as e:
14 dsa2 = None
15
16 ds_accelerator = None
17
18
19 def _validate_accelerator(accel_obj):
20 # because abstract_accelerator has different path during
21 # build time (accelerator.abstract_accelerator)
22 # and run time (deepspeed.accelerator.abstract_accelerator)
23 # and extension would import the
24 # run time abstract_accelerator/DeepSpeedAccelerator as its base
25 # class, so we need to compare accel_obj with both base class.
26 # if accel_obj is instance of DeepSpeedAccelerator in one of
27 # accelerator.abstractor_accelerator
28 # or deepspeed.accelerator.abstract_accelerator, consider accel_obj
29 # is a conforming object
30 if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):
31 raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')
32
33 # TODO: turn off is_available test since this breaks tests
34 #assert accel_obj.is_available(), \
35 # f'{accel_obj.__class__.__name__} accelerator fails is_available() test'
36
37
38 def get_accelerator():
39 global ds_accelerator
40 if ds_accelerator is not None:
41 return ds_accelerator
42
43 accelerator_name = None
44 ds_set_method = None
45 # 1. Detect whether there is override of DeepSpeed accelerators from environment variable.
46 # DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'
47 if 'DS_ACCELERATOR' in os.environ.keys():
48 accelerator_name = os.environ['DS_ACCELERATOR']
49 if accelerator_name == 'xpu':
50 try:
51 from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401
52 except ImportError as e:
53 raise ValueError(
54 f'XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.')
55 elif accelerator_name == 'cpu':
56 try:
57 import intel_extension_for_pytorch # noqa: F401
58 except ImportError as e:
59 raise ValueError(
60 f'CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.')
61 elif accelerator_name == 'cuda':
62 pass
63 else:
64 raise ValueError(
65 f'DS_ACCELERATOR must be one of "cuda", "cpu", or "xpu". Value "{accelerator_name}" is not supported')
66 ds_set_method = 'override'
67
68 # 2. If no override, detect which accelerator to use automatically
69 if accelerator_name == None:
70 try:
71 from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811
72 accelerator_name = 'xpu'
73 except ImportError as e:
74 # We need a way to choose between CUDA_Accelerator and CPU_Accelerator
75 # Currently we detect whether intel_extension_for_pytorch is installed
76 # in the environment and use CPU_Accelerator if the answer is True.
77 # An alternative might be detect whether CUDA device is installed on
78 # the system but this comes with two pitfalls:
79 # 1. the system may not have torch pre-installed, so
80 # get_accelerator().is_available() may not work.
81 # 2. Some scenario like install on login node (without CUDA device)
82 # and run on compute node (with CUDA device) may cause mismatch
83 # between installation time and runtime.
84 try:
85 import intel_extension_for_pytorch # noqa: F401,F811
86 accelerator_name = 'cpu'
87 except ImportError as e:
88 accelerator_name = 'cuda'
89 ds_set_method = 'auto detect'
90
91 # 3. Set ds_accelerator accordingly
92 if accelerator_name == 'cuda':
93 from .cuda_accelerator import CUDA_Accelerator
94 ds_accelerator = CUDA_Accelerator()
95 elif accelerator_name == 'cpu':
96 from .cpu_accelerator import CPU_Accelerator
97 ds_accelerator = CPU_Accelerator()
98 elif accelerator_name == 'xpu':
99 # XPU_Accelerator is already imported in detection stage
100 ds_accelerator = XPU_Accelerator()
101 _validate_accelerator(ds_accelerator)
102 print(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
103 return ds_accelerator
104
105
106 def set_accelerator(accel_obj):
107 global ds_accelerator
108 _validate_accelerator(accel_obj)
109 print(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
110 ds_accelerator = accel_obj
111
112
113 '''
114 -----------[code] test_get.py -----------
115 from deepspeed.accelerator import get_accelerator
116 my_accelerator = get_accelerator()
117 print(f'{my_accelerator._name=}')
118 print(f'{my_accelerator._communication_backend=}')
119 print(f'{my_accelerator.HalfTensor().device=}')
120 print(f'{my_accelerator.total_memory()=}')
121 -----------[code] test_get.py -----------
122
123 ---[output] python test_get.py---------
124 my_accelerator.name()='cuda'
125 my_accelerator.communication_backend='nccl'
126 my_accelerator.HalfTensor().device=device(type='cuda', index=0)
127 my_accelerator.total_memory()=34089730048
128 ---[output] python test_get.py---------
129
130 **************************************************************************
131 -----------[code] test_set.py -----------
132 from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
133 cu_accel = CUDA_Accelerator()
134 print(f'{id(cu_accel)=}')
135 from deepspeed.accelerator import set_accelerator, get_accelerator
136 set_accelerator(cu_accel)
137
138 my_accelerator = get_accelerator()
139 print(f'{id(my_accelerator)=}')
140 print(f'{my_accelerator._name=}')
141 print(f'{my_accelerator._communication_backend=}')
142 print(f'{my_accelerator.HalfTensor().device=}')
143 print(f'{my_accelerator.total_memory()=}')
144 -----------[code] test_set.py -----------
145
146
147 ---[output] python test_set.py---------
148 id(cu_accel)=139648165478304
149 my_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>
150 my_accelerator.name='cuda'
151 my_accelerator.communication_backend='nccl'
152 my_accelerator.HalfTensor().device=device(type='cuda', index=0)
153 my_accelerator.total_memory()=34089730048
154 ---[output] python test_set.py---------
155 '''
156
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/accelerator/real_accelerator.py b/accelerator/real_accelerator.py
--- a/accelerator/real_accelerator.py
+++ b/accelerator/real_accelerator.py
@@ -4,6 +4,13 @@
# DeepSpeed Team
import os
+try:
+ # Importing logger currently requires that torch is installed, hence the try...except
+ # TODO: Remove logger dependency on torch.
+ from deepspeed.utils import logger as accel_logger
+except ImportError as e:
+ accel_logger = None
+
try:
from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1
except ImportError as e:
@@ -99,14 +106,16 @@
# XPU_Accelerator is already imported in detection stage
ds_accelerator = XPU_Accelerator()
_validate_accelerator(ds_accelerator)
- print(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
+ if accel_logger is not None:
+ accel_logger.info(f"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})")
return ds_accelerator
def set_accelerator(accel_obj):
global ds_accelerator
_validate_accelerator(accel_obj)
- print(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
+ if accel_logger is not None:
+ accel_logger.info(f"Setting ds_accelerator to {accel_obj._name} (model specified)")
ds_accelerator = accel_obj
@@ -114,10 +123,10 @@
-----------[code] test_get.py -----------
from deepspeed.accelerator import get_accelerator
my_accelerator = get_accelerator()
-print(f'{my_accelerator._name=}')
-print(f'{my_accelerator._communication_backend=}')
-print(f'{my_accelerator.HalfTensor().device=}')
-print(f'{my_accelerator.total_memory()=}')
+logger.info(f'{my_accelerator._name=}')
+logger.info(f'{my_accelerator._communication_backend=}')
+logger.info(f'{my_accelerator.HalfTensor().device=}')
+logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_get.py -----------
---[output] python test_get.py---------
@@ -131,16 +140,16 @@
-----------[code] test_set.py -----------
from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator
cu_accel = CUDA_Accelerator()
-print(f'{id(cu_accel)=}')
+logger.info(f'{id(cu_accel)=}')
from deepspeed.accelerator import set_accelerator, get_accelerator
set_accelerator(cu_accel)
my_accelerator = get_accelerator()
-print(f'{id(my_accelerator)=}')
-print(f'{my_accelerator._name=}')
-print(f'{my_accelerator._communication_backend=}')
-print(f'{my_accelerator.HalfTensor().device=}')
-print(f'{my_accelerator.total_memory()=}')
+logger.info(f'{id(my_accelerator)=}')
+logger.info(f'{my_accelerator._name=}')
+logger.info(f'{my_accelerator._communication_backend=}')
+logger.info(f'{my_accelerator.HalfTensor().device=}')
+logger.info(f'{my_accelerator.total_memory()=}')
-----------[code] test_set.py -----------
| {"golden_diff": "diff --git a/accelerator/real_accelerator.py b/accelerator/real_accelerator.py\n--- a/accelerator/real_accelerator.py\n+++ b/accelerator/real_accelerator.py\n@@ -4,6 +4,13 @@\n # DeepSpeed Team\n import os\n \n+try:\n+ # Importing logger currently requires that torch is installed, hence the try...except\n+ # TODO: Remove logger dependency on torch.\n+ from deepspeed.utils import logger as accel_logger\n+except ImportError as e:\n+ accel_logger = None\n+\n try:\n from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1\n except ImportError as e:\n@@ -99,14 +106,16 @@\n # XPU_Accelerator is already imported in detection stage\n ds_accelerator = XPU_Accelerator()\n _validate_accelerator(ds_accelerator)\n- print(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n+ if accel_logger is not None:\n+ accel_logger.info(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n return ds_accelerator\n \n \n def set_accelerator(accel_obj):\n global ds_accelerator\n _validate_accelerator(accel_obj)\n- print(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n+ if accel_logger is not None:\n+ accel_logger.info(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n ds_accelerator = accel_obj\n \n \n@@ -114,10 +123,10 @@\n -----------[code] test_get.py -----------\n from deepspeed.accelerator import get_accelerator\n my_accelerator = get_accelerator()\n-print(f'{my_accelerator._name=}')\n-print(f'{my_accelerator._communication_backend=}')\n-print(f'{my_accelerator.HalfTensor().device=}')\n-print(f'{my_accelerator.total_memory()=}')\n+logger.info(f'{my_accelerator._name=}')\n+logger.info(f'{my_accelerator._communication_backend=}')\n+logger.info(f'{my_accelerator.HalfTensor().device=}')\n+logger.info(f'{my_accelerator.total_memory()=}')\n -----------[code] test_get.py -----------\n \n ---[output] python test_get.py---------\n@@ -131,16 +140,16 @@\n -----------[code] test_set.py -----------\n from deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator\n cu_accel = CUDA_Accelerator()\n-print(f'{id(cu_accel)=}')\n+logger.info(f'{id(cu_accel)=}')\n from deepspeed.accelerator import set_accelerator, get_accelerator\n set_accelerator(cu_accel)\n \n my_accelerator = get_accelerator()\n-print(f'{id(my_accelerator)=}')\n-print(f'{my_accelerator._name=}')\n-print(f'{my_accelerator._communication_backend=}')\n-print(f'{my_accelerator.HalfTensor().device=}')\n-print(f'{my_accelerator.total_memory()=}')\n+logger.info(f'{id(my_accelerator)=}')\n+logger.info(f'{my_accelerator._name=}')\n+logger.info(f'{my_accelerator._communication_backend=}')\n+logger.info(f'{my_accelerator.HalfTensor().device=}')\n+logger.info(f'{my_accelerator.total_memory()=}')\n -----------[code] test_set.py -----------\n", "issue": "[BUG] \"Setting ds_accelerator\" uses `print` not `logger`\n**Describe the bug**\r\nWhen `accelerate` tries to run any command while `DeepSpeed` is available, we get `\"Setting ds_accelerator ...\"`, which while not explicitly bad, is making a test fail due to a non-clean CLI print when non-deepspeed code is used (we have a check for `is_deepspeed_available()` which is probably triggering this.\r\n\r\nAs a nice QOL, it would be good to have `get_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L102) and `set_accelerator` (https://github.com/microsoft/DeepSpeed/blob/master/accelerator/real_accelerator.py#L109) use the already-existing logger in the framework if possible, so that we can disable these annoying prints when they're not needed :)\r\n\r\n**To Reproduce**\r\nSteps to reproduce the behavior:\r\n1. `pip install accelerate deepspeed -U`\r\n2. Create a file with:\r\n```python\r\nfrom accelerate.commands.tpu import tpu_command_parser, tpu_command_launcher\r\n\r\nparser = tpu_command_parser()\r\nargs = parser.parse_args([\r\n \"--config_file\", \"tests/test_configs/latest.yaml\",\r\n \"--install_accelerate\",\r\n \"--debug\"\r\n])\r\n\r\ntpu_command_launcher(args)\r\n```\r\n3. Run `python {my_file_name.py}`\r\n4. Should print:\r\n```bash\r\nSetting ds_accelerator to cuda (auto detect)\r\nRunning gcloud compute tpus tpu-vm ssh test-tpu --zone us-central1-a --command cd /usr/share; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all\r\n```\r\n\r\n**Expected behavior**\r\n\r\nA configurable option to silence these print statements by having them run through the logging system instead, perhaps as an `info` or as a `debug`. \n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\nimport os\n\ntry:\n from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1\nexcept ImportError as e:\n dsa1 = None\ntry:\n from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2\nexcept ImportError as e:\n dsa2 = None\n\nds_accelerator = None\n\n\ndef _validate_accelerator(accel_obj):\n # because abstract_accelerator has different path during\n # build time (accelerator.abstract_accelerator)\n # and run time (deepspeed.accelerator.abstract_accelerator)\n # and extension would import the\n # run time abstract_accelerator/DeepSpeedAccelerator as its base\n # class, so we need to compare accel_obj with both base class.\n # if accel_obj is instance of DeepSpeedAccelerator in one of\n # accelerator.abstractor_accelerator\n # or deepspeed.accelerator.abstract_accelerator, consider accel_obj\n # is a conforming object\n if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):\n raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')\n\n # TODO: turn off is_available test since this breaks tests\n #assert accel_obj.is_available(), \\\n # f'{accel_obj.__class__.__name__} accelerator fails is_available() test'\n\n\ndef get_accelerator():\n global ds_accelerator\n if ds_accelerator is not None:\n return ds_accelerator\n\n accelerator_name = None\n ds_set_method = None\n # 1. Detect whether there is override of DeepSpeed accelerators from environment variable.\n # DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'\n if 'DS_ACCELERATOR' in os.environ.keys():\n accelerator_name = os.environ['DS_ACCELERATOR']\n if accelerator_name == 'xpu':\n try:\n from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401\n except ImportError as e:\n raise ValueError(\n f'XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.')\n elif accelerator_name == 'cpu':\n try:\n import intel_extension_for_pytorch # noqa: F401\n except ImportError as e:\n raise ValueError(\n f'CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.')\n elif accelerator_name == 'cuda':\n pass\n else:\n raise ValueError(\n f'DS_ACCELERATOR must be one of \"cuda\", \"cpu\", or \"xpu\". Value \"{accelerator_name}\" is not supported')\n ds_set_method = 'override'\n\n # 2. If no override, detect which accelerator to use automatically\n if accelerator_name == None:\n try:\n from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811\n accelerator_name = 'xpu'\n except ImportError as e:\n # We need a way to choose between CUDA_Accelerator and CPU_Accelerator\n # Currently we detect whether intel_extension_for_pytorch is installed\n # in the environment and use CPU_Accelerator if the answer is True.\n # An alternative might be detect whether CUDA device is installed on\n # the system but this comes with two pitfalls:\n # 1. the system may not have torch pre-installed, so\n # get_accelerator().is_available() may not work.\n # 2. Some scenario like install on login node (without CUDA device)\n # and run on compute node (with CUDA device) may cause mismatch\n # between installation time and runtime.\n try:\n import intel_extension_for_pytorch # noqa: F401,F811\n accelerator_name = 'cpu'\n except ImportError as e:\n accelerator_name = 'cuda'\n ds_set_method = 'auto detect'\n\n # 3. Set ds_accelerator accordingly\n if accelerator_name == 'cuda':\n from .cuda_accelerator import CUDA_Accelerator\n ds_accelerator = CUDA_Accelerator()\n elif accelerator_name == 'cpu':\n from .cpu_accelerator import CPU_Accelerator\n ds_accelerator = CPU_Accelerator()\n elif accelerator_name == 'xpu':\n # XPU_Accelerator is already imported in detection stage\n ds_accelerator = XPU_Accelerator()\n _validate_accelerator(ds_accelerator)\n print(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n return ds_accelerator\n\n\ndef set_accelerator(accel_obj):\n global ds_accelerator\n _validate_accelerator(accel_obj)\n print(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n ds_accelerator = accel_obj\n\n\n'''\n-----------[code] test_get.py -----------\nfrom deepspeed.accelerator import get_accelerator\nmy_accelerator = get_accelerator()\nprint(f'{my_accelerator._name=}')\nprint(f'{my_accelerator._communication_backend=}')\nprint(f'{my_accelerator.HalfTensor().device=}')\nprint(f'{my_accelerator.total_memory()=}')\n-----------[code] test_get.py -----------\n\n---[output] python test_get.py---------\nmy_accelerator.name()='cuda'\nmy_accelerator.communication_backend='nccl'\nmy_accelerator.HalfTensor().device=device(type='cuda', index=0)\nmy_accelerator.total_memory()=34089730048\n---[output] python test_get.py---------\n\n**************************************************************************\n-----------[code] test_set.py -----------\nfrom deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator\ncu_accel = CUDA_Accelerator()\nprint(f'{id(cu_accel)=}')\nfrom deepspeed.accelerator import set_accelerator, get_accelerator\nset_accelerator(cu_accel)\n\nmy_accelerator = get_accelerator()\nprint(f'{id(my_accelerator)=}')\nprint(f'{my_accelerator._name=}')\nprint(f'{my_accelerator._communication_backend=}')\nprint(f'{my_accelerator.HalfTensor().device=}')\nprint(f'{my_accelerator.total_memory()=}')\n-----------[code] test_set.py -----------\n\n\n---[output] python test_set.py---------\nid(cu_accel)=139648165478304\nmy_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>\nmy_accelerator.name='cuda'\nmy_accelerator.communication_backend='nccl'\nmy_accelerator.HalfTensor().device=device(type='cuda', index=0)\nmy_accelerator.total_memory()=34089730048\n---[output] python test_set.py---------\n'''\n", "path": "accelerator/real_accelerator.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation.\n# SPDX-License-Identifier: Apache-2.0\n\n# DeepSpeed Team\nimport os\n\ntry:\n # Importing logger currently requires that torch is installed, hence the try...except\n # TODO: Remove logger dependency on torch.\n from deepspeed.utils import logger as accel_logger\nexcept ImportError as e:\n accel_logger = None\n\ntry:\n from accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa1\nexcept ImportError as e:\n dsa1 = None\ntry:\n from deepspeed.accelerator.abstract_accelerator import DeepSpeedAccelerator as dsa2\nexcept ImportError as e:\n dsa2 = None\n\nds_accelerator = None\n\n\ndef _validate_accelerator(accel_obj):\n # because abstract_accelerator has different path during\n # build time (accelerator.abstract_accelerator)\n # and run time (deepspeed.accelerator.abstract_accelerator)\n # and extension would import the\n # run time abstract_accelerator/DeepSpeedAccelerator as its base\n # class, so we need to compare accel_obj with both base class.\n # if accel_obj is instance of DeepSpeedAccelerator in one of\n # accelerator.abstractor_accelerator\n # or deepspeed.accelerator.abstract_accelerator, consider accel_obj\n # is a conforming object\n if not ((dsa1 != None and isinstance(accel_obj, dsa1)) or (dsa2 != None and isinstance(accel_obj, dsa2))):\n raise AssertionError(f'{accel_obj.__class__.__name__} accelerator is not subclass of DeepSpeedAccelerator')\n\n # TODO: turn off is_available test since this breaks tests\n #assert accel_obj.is_available(), \\\n # f'{accel_obj.__class__.__name__} accelerator fails is_available() test'\n\n\ndef get_accelerator():\n global ds_accelerator\n if ds_accelerator is not None:\n return ds_accelerator\n\n accelerator_name = None\n ds_set_method = None\n # 1. Detect whether there is override of DeepSpeed accelerators from environment variable.\n # DS_ACCELERATOR = 'cuda'|'xpu'|'cpu'\n if 'DS_ACCELERATOR' in os.environ.keys():\n accelerator_name = os.environ['DS_ACCELERATOR']\n if accelerator_name == 'xpu':\n try:\n from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401\n except ImportError as e:\n raise ValueError(\n f'XPU_Accelerator requires intel_extension_for_deepspeed, which is not installed on this system.')\n elif accelerator_name == 'cpu':\n try:\n import intel_extension_for_pytorch # noqa: F401\n except ImportError as e:\n raise ValueError(\n f'CPU_Accelerator requires intel_extension_for_pytorch, which is not installed on this system.')\n elif accelerator_name == 'cuda':\n pass\n else:\n raise ValueError(\n f'DS_ACCELERATOR must be one of \"cuda\", \"cpu\", or \"xpu\". Value \"{accelerator_name}\" is not supported')\n ds_set_method = 'override'\n\n # 2. If no override, detect which accelerator to use automatically\n if accelerator_name == None:\n try:\n from intel_extension_for_deepspeed import XPU_Accelerator # noqa: F401,F811\n accelerator_name = 'xpu'\n except ImportError as e:\n # We need a way to choose between CUDA_Accelerator and CPU_Accelerator\n # Currently we detect whether intel_extension_for_pytorch is installed\n # in the environment and use CPU_Accelerator if the answer is True.\n # An alternative might be detect whether CUDA device is installed on\n # the system but this comes with two pitfalls:\n # 1. the system may not have torch pre-installed, so\n # get_accelerator().is_available() may not work.\n # 2. Some scenario like install on login node (without CUDA device)\n # and run on compute node (with CUDA device) may cause mismatch\n # between installation time and runtime.\n try:\n import intel_extension_for_pytorch # noqa: F401,F811\n accelerator_name = 'cpu'\n except ImportError as e:\n accelerator_name = 'cuda'\n ds_set_method = 'auto detect'\n\n # 3. Set ds_accelerator accordingly\n if accelerator_name == 'cuda':\n from .cuda_accelerator import CUDA_Accelerator\n ds_accelerator = CUDA_Accelerator()\n elif accelerator_name == 'cpu':\n from .cpu_accelerator import CPU_Accelerator\n ds_accelerator = CPU_Accelerator()\n elif accelerator_name == 'xpu':\n # XPU_Accelerator is already imported in detection stage\n ds_accelerator = XPU_Accelerator()\n _validate_accelerator(ds_accelerator)\n if accel_logger is not None:\n accel_logger.info(f\"Setting ds_accelerator to {ds_accelerator._name} ({ds_set_method})\")\n return ds_accelerator\n\n\ndef set_accelerator(accel_obj):\n global ds_accelerator\n _validate_accelerator(accel_obj)\n if accel_logger is not None:\n accel_logger.info(f\"Setting ds_accelerator to {accel_obj._name} (model specified)\")\n ds_accelerator = accel_obj\n\n\n'''\n-----------[code] test_get.py -----------\nfrom deepspeed.accelerator import get_accelerator\nmy_accelerator = get_accelerator()\nlogger.info(f'{my_accelerator._name=}')\nlogger.info(f'{my_accelerator._communication_backend=}')\nlogger.info(f'{my_accelerator.HalfTensor().device=}')\nlogger.info(f'{my_accelerator.total_memory()=}')\n-----------[code] test_get.py -----------\n\n---[output] python test_get.py---------\nmy_accelerator.name()='cuda'\nmy_accelerator.communication_backend='nccl'\nmy_accelerator.HalfTensor().device=device(type='cuda', index=0)\nmy_accelerator.total_memory()=34089730048\n---[output] python test_get.py---------\n\n**************************************************************************\n-----------[code] test_set.py -----------\nfrom deepspeed.accelerator.cuda_accelerator import CUDA_Accelerator\ncu_accel = CUDA_Accelerator()\nlogger.info(f'{id(cu_accel)=}')\nfrom deepspeed.accelerator import set_accelerator, get_accelerator\nset_accelerator(cu_accel)\n\nmy_accelerator = get_accelerator()\nlogger.info(f'{id(my_accelerator)=}')\nlogger.info(f'{my_accelerator._name=}')\nlogger.info(f'{my_accelerator._communication_backend=}')\nlogger.info(f'{my_accelerator.HalfTensor().device=}')\nlogger.info(f'{my_accelerator.total_memory()=}')\n-----------[code] test_set.py -----------\n\n\n---[output] python test_set.py---------\nid(cu_accel)=139648165478304\nmy_accelerator=<deepspeed.accelerator.cuda_accelerator.CUDA_Accelerator object at 0x7f025f4bffa0>\nmy_accelerator.name='cuda'\nmy_accelerator.communication_backend='nccl'\nmy_accelerator.HalfTensor().device=device(type='cuda', index=0)\nmy_accelerator.total_memory()=34089730048\n---[output] python test_set.py---------\n'''\n", "path": "accelerator/real_accelerator.py"}]} | 2,551 | 729 |
gh_patches_debug_28287 | rasdani/github-patches | git_diff | gratipay__gratipay.com-3904 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Speed up tip migration script
Reticketed from https://github.com/gratipay/inside.gratipay.com/issues/468#issuecomment-171707621
When we wrote this script, we thought that we'd only be using it for a month or two. Now that it's here to stay - time to work on it a bit :)
The script takes so long because we're pulling in _all_ approved teams. We could place a check in the script so that we only pull in those teams that satisfy the following criteria -
1) Team owner must have 0+ tips from Gratipay 1.0
2) Team must have zero `payment_instructions` that have a `ctime` lesser than the team's `ctime` (i.e. migrated tips). We use the `ctime` attribute to differentiate `payment_instructions` that were created as a result of migrated tips vs created in Gratipay 2.0
https://github.com/gratipay/gratipay.com/blob/master/bin/migrate-tips.py#L6-L10
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gratipay/models/team.py`
Content:
```
1 """Teams on Gratipay receive payments and distribute payroll.
2 """
3 import requests
4 from aspen import json, log
5 from gratipay.models import add_event
6 from postgres.orm import Model
7
8
9 class Team(Model):
10 """Represent a Gratipay team.
11 """
12
13 typname = 'teams'
14
15 def __eq__(self, other):
16 if not isinstance(other, Team):
17 return False
18 return self.id == other.id
19
20 def __ne__(self, other):
21 if not isinstance(other, Team):
22 return True
23 return self.id != other.id
24
25
26 # Constructors
27 # ============
28
29 @classmethod
30 def from_id(cls, id):
31 """Return an existing team based on id.
32 """
33 return cls._from_thing("id", id)
34
35 @classmethod
36 def from_slug(cls, slug):
37 """Return an existing team based on slug.
38 """
39 return cls._from_thing("slug_lower", slug.lower())
40
41 @classmethod
42 def _from_thing(cls, thing, value):
43 assert thing in ("id", "slug_lower")
44 return cls.db.one("""
45
46 SELECT teams.*::teams
47 FROM teams
48 WHERE {}=%s
49
50 """.format(thing), (value,))
51
52 @classmethod
53 def insert(cls, owner, **fields):
54 fields['slug_lower'] = fields['slug'].lower()
55 fields['owner'] = owner.username
56 return cls.db.one("""
57
58 INSERT INTO teams
59 (slug, slug_lower, name, homepage,
60 product_or_service, todo_url, onboarding_url,
61 owner)
62 VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,
63 %(product_or_service)s, %(todo_url)s, %(onboarding_url)s,
64 %(owner)s)
65 RETURNING teams.*::teams
66
67 """, fields)
68
69
70 def create_github_review_issue(self):
71 """POST to GitHub, and return the URL of the new issue.
72 """
73 api_url = "https://api.github.com/repos/{}/issues".format(self.review_repo)
74 data = json.dumps({ "title": self.name
75 , "body": "https://gratipay.com/{}/\n\n".format(self.slug) +
76 "(This application will remain open for at least a week.)"
77 })
78 out = ''
79 try:
80 r = requests.post(api_url, auth=self.review_auth, data=data)
81 if r.status_code == 201:
82 out = r.json()['html_url']
83 else:
84 log(r.status_code)
85 log(r.text)
86 err = str(r.status_code)
87 except:
88 err = "eep"
89 if not out:
90 out = "https://github.com/gratipay/team-review/issues#error-{}".format(err)
91 return out
92
93
94 def set_review_url(self, review_url):
95 self.db.run("UPDATE teams SET review_url=%s WHERE id=%s", (review_url, self.id))
96 self.set_attributes(review_url=review_url)
97
98
99 def get_og_title(self):
100 out = self.name
101 receiving = self.receiving
102 if receiving > 0:
103 out += " receives $%.2f/wk" % receiving
104 else:
105 out += " is"
106 return out + " on Gratipay"
107
108
109 def update_receiving(self, cursor=None):
110 r = (cursor or self.db).one("""
111 WITH our_receiving AS (
112 SELECT amount
113 FROM current_payment_instructions
114 JOIN participants p ON p.username = participant
115 WHERE team = %(slug)s
116 AND p.is_suspicious IS NOT true
117 AND amount > 0
118 AND is_funded
119 )
120 UPDATE teams t
121 SET receiving = COALESCE((SELECT sum(amount) FROM our_receiving), 0)
122 , nreceiving_from = COALESCE((SELECT count(*) FROM our_receiving), 0)
123 , distributing = COALESCE((SELECT sum(amount) FROM our_receiving), 0)
124 , ndistributing_to = 1
125 WHERE t.slug = %(slug)s
126 RETURNING receiving, nreceiving_from, distributing, ndistributing_to
127 """, dict(slug=self.slug))
128
129
130 # This next step is easy for now since we don't have payroll.
131 from gratipay.models.participant import Participant
132 Participant.from_username(self.owner).update_taking(cursor or self.db)
133
134 self.set_attributes( receiving=r.receiving
135 , nreceiving_from=r.nreceiving_from
136 , distributing=r.distributing
137 , ndistributing_to=r.ndistributing_to
138 )
139
140 @property
141 def status(self):
142 return { None: 'unreviewed'
143 , False: 'rejected'
144 , True: 'approved'
145 }[self.is_approved]
146
147 def to_dict(self):
148 return {
149 'homepage': self.homepage,
150 'name': self.name,
151 'nreceiving_from': self.nreceiving_from,
152 'onboarding_url': self.onboarding_url,
153 'owner': '~' + self.owner,
154 'receiving': self.receiving,
155 'slug': self.slug,
156 'status': self.status,
157 'todo_url': self.todo_url
158 }
159
160 def migrate_tips(self):
161 payment_instructions = self.db.all("""
162 SELECT pi.*
163 FROM payment_instructions pi
164 JOIN teams t ON t.slug = pi.team
165 JOIN participants p ON t.owner = p.username
166 WHERE p.username = %s
167 AND pi.ctime < t.ctime
168 """, (self.owner, ))
169
170 # Make sure the migration hasn't been done already
171 if payment_instructions:
172 raise AlreadyMigrated
173
174 return self.db.one("""
175 WITH rows AS (
176
177 INSERT INTO payment_instructions
178 (ctime, mtime, participant, team, amount, is_funded)
179 SELECT ct.ctime
180 , ct.mtime
181 , ct.tipper
182 , %(slug)s
183 , ct.amount
184 , ct.is_funded
185 FROM current_tips ct
186 JOIN participants p ON p.username = tipper
187 WHERE ct.tippee=%(owner)s
188 AND p.claimed_time IS NOT NULL
189 AND p.is_suspicious IS NOT TRUE
190 AND p.is_closed IS NOT TRUE
191 RETURNING 1
192
193 ) SELECT count(*) FROM rows;
194 """, {'slug': self.slug, 'owner': self.owner})
195
196
197 # Images
198 # ======
199
200 IMAGE_SIZES = ('original', 'large', 'small')
201
202 def get_image_url(self, size):
203 assert size in ('original', 'large', 'small'), size
204 return '/{}/image?size={}'.format(self.slug, size)
205
206 def save_image(self, original, large, small, image_type):
207 with self.db.get_cursor() as c:
208 oids = {}
209 for size in self.IMAGE_SIZES:
210 lobject = c.connection.lobject(getattr(self, 'image_oid_'+size), mode='wb')
211 lobject.write(locals()[size])
212 oids[size] = lobject.oid
213 lobject.close()
214
215 c.run("""UPDATE teams
216 SET image_oid_original=%s, image_oid_large=%s, image_oid_small=%s
217 , image_type=%s
218 WHERE id=%s"""
219 , (oids['original'], oids['large'], oids['small'], image_type, self.id)
220 )
221 add_event(c, 'team', dict( action='upsert_image'
222 , id=self.id
223 , **oids
224 ))
225 self.set_attributes( image_type=image_type
226 , **{'image_oid_'+size: oids[size] for size in oids}
227 )
228 return oids
229
230 def load_image(self, size):
231 assert size in self.IMAGE_SIZES, size
232 image = None
233 oid = getattr(self, 'image_oid_{}'.format(size))
234 if oid != 0:
235 with self.db.get_connection() as c:
236 image = c.lobject(oid, mode='rb').read()
237 return image
238
239
240 class AlreadyMigrated(Exception): pass
241
```
Path: `bin/migrate-tips.py`
Content:
```
1 from gratipay.wireup import db, env
2 from gratipay.models.team import Team, AlreadyMigrated
3
4 db = db(env())
5
6 slugs = db.all("""
7 SELECT slug
8 FROM teams
9 WHERE is_approved IS TRUE
10 """)
11
12 for slug in slugs:
13 team = Team.from_slug(slug)
14 try:
15 ntips = team.migrate_tips()
16 print("Migrated {} tip(s) for '{}'".format(ntips, slug))
17 except AlreadyMigrated:
18 print("'%s' already migrated." % slug)
19
20 print("Done.")
21
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bin/migrate-tips.py b/bin/migrate-tips.py
--- a/bin/migrate-tips.py
+++ b/bin/migrate-tips.py
@@ -1,20 +1,27 @@
from gratipay.wireup import db, env
-from gratipay.models.team import Team, AlreadyMigrated
+from gratipay.models.team import AlreadyMigrated
db = db(env())
-slugs = db.all("""
- SELECT slug
- FROM teams
- WHERE is_approved IS TRUE
+teams = db.all("""
+ SELECT distinct ON (t.slug) t.*::teams
+ FROM teams t
+ JOIN tips ON t.owner = tips.tippee -- Only fetch teams whose owners had tips under Gratipay 1.0
+ WHERE t.is_approved IS TRUE -- Only fetch approved teams
+ AND NOT EXISTS ( -- Make sure tips haven't been migrated for any teams with same owner
+ SELECT 1
+ FROM payment_instructions pi
+ JOIN teams t2 ON t2.slug = pi.team
+ WHERE t2.owner = t.owner
+ AND pi.ctime < t2.ctime
+ )
""")
-for slug in slugs:
- team = Team.from_slug(slug)
+for team in teams:
try:
ntips = team.migrate_tips()
- print("Migrated {} tip(s) for '{}'".format(ntips, slug))
+ print("Migrated {} tip(s) for '{}'".format(ntips, team.slug))
except AlreadyMigrated:
- print("'%s' already migrated." % slug)
+ print("'%s' already migrated." % team.slug)
print("Done.")
diff --git a/gratipay/models/team.py b/gratipay/models/team.py
--- a/gratipay/models/team.py
+++ b/gratipay/models/team.py
@@ -162,8 +162,7 @@
SELECT pi.*
FROM payment_instructions pi
JOIN teams t ON t.slug = pi.team
- JOIN participants p ON t.owner = p.username
- WHERE p.username = %s
+ WHERE t.owner = %s
AND pi.ctime < t.ctime
""", (self.owner, ))
| {"golden_diff": "diff --git a/bin/migrate-tips.py b/bin/migrate-tips.py\n--- a/bin/migrate-tips.py\n+++ b/bin/migrate-tips.py\n@@ -1,20 +1,27 @@\n from gratipay.wireup import db, env\n-from gratipay.models.team import Team, AlreadyMigrated\n+from gratipay.models.team import AlreadyMigrated\n \n db = db(env())\n \n-slugs = db.all(\"\"\"\n- SELECT slug\n- FROM teams\n- WHERE is_approved IS TRUE\n+teams = db.all(\"\"\"\n+ SELECT distinct ON (t.slug) t.*::teams\n+ FROM teams t\n+ JOIN tips ON t.owner = tips.tippee -- Only fetch teams whose owners had tips under Gratipay 1.0\n+ WHERE t.is_approved IS TRUE -- Only fetch approved teams\n+ AND NOT EXISTS ( -- Make sure tips haven't been migrated for any teams with same owner\n+ SELECT 1\n+ FROM payment_instructions pi\n+ JOIN teams t2 ON t2.slug = pi.team\n+ WHERE t2.owner = t.owner\n+ AND pi.ctime < t2.ctime\n+ )\n \"\"\")\n \n-for slug in slugs:\n- team = Team.from_slug(slug)\n+for team in teams:\n try:\n ntips = team.migrate_tips()\n- print(\"Migrated {} tip(s) for '{}'\".format(ntips, slug))\n+ print(\"Migrated {} tip(s) for '{}'\".format(ntips, team.slug))\n except AlreadyMigrated:\n- print(\"'%s' already migrated.\" % slug)\n+ print(\"'%s' already migrated.\" % team.slug)\n \n print(\"Done.\")\ndiff --git a/gratipay/models/team.py b/gratipay/models/team.py\n--- a/gratipay/models/team.py\n+++ b/gratipay/models/team.py\n@@ -162,8 +162,7 @@\n SELECT pi.*\n FROM payment_instructions pi\n JOIN teams t ON t.slug = pi.team\n- JOIN participants p ON t.owner = p.username\n- WHERE p.username = %s\n+ WHERE t.owner = %s\n AND pi.ctime < t.ctime\n \"\"\", (self.owner, ))\n", "issue": "Speed up tip migration script\nReticketed from https://github.com/gratipay/inside.gratipay.com/issues/468#issuecomment-171707621\n\nWhen we wrote this script, we thought that we'd only be using it for a month or two. Now that it's here to stay - time to work on it a bit :) \n\nThe script takes so long because we're pulling in _all_ approved teams. We could place a check in the script so that we only pull in those teams that satisfy the following criteria - \n\n1) Team owner must have 0+ tips from Gratipay 1.0\n2) Team must have zero `payment_instructions` that have a `ctime` lesser than the team's `ctime` (i.e. migrated tips). We use the `ctime` attribute to differentiate `payment_instructions` that were created as a result of migrated tips vs created in Gratipay 2.0\n\nhttps://github.com/gratipay/gratipay.com/blob/master/bin/migrate-tips.py#L6-L10\n\n", "before_files": [{"content": "\"\"\"Teams on Gratipay receive payments and distribute payroll.\n\"\"\"\nimport requests\nfrom aspen import json, log\nfrom gratipay.models import add_event\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def insert(cls, owner, **fields):\n fields['slug_lower'] = fields['slug'].lower()\n fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage,\n product_or_service, todo_url, onboarding_url,\n owner)\n VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n %(product_or_service)s, %(todo_url)s, %(onboarding_url)s,\n %(owner)s)\n RETURNING teams.*::teams\n\n \"\"\", fields)\n\n\n def create_github_review_issue(self):\n \"\"\"POST to GitHub, and return the URL of the new issue.\n \"\"\"\n api_url = \"https://api.github.com/repos/{}/issues\".format(self.review_repo)\n data = json.dumps({ \"title\": self.name\n , \"body\": \"https://gratipay.com/{}/\\n\\n\".format(self.slug) +\n \"(This application will remain open for at least a week.)\"\n })\n out = ''\n try:\n r = requests.post(api_url, auth=self.review_auth, data=data)\n if r.status_code == 201:\n out = r.json()['html_url']\n else:\n log(r.status_code)\n log(r.text)\n err = str(r.status_code)\n except:\n err = \"eep\"\n if not out:\n out = \"https://github.com/gratipay/team-review/issues#error-{}\".format(err)\n return out\n\n\n def set_review_url(self, review_url):\n self.db.run(\"UPDATE teams SET review_url=%s WHERE id=%s\", (review_url, self.id))\n self.set_attributes(review_url=review_url)\n\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n r = (cursor or self.db).one(\"\"\"\n WITH our_receiving AS (\n SELECT amount\n FROM current_payment_instructions\n JOIN participants p ON p.username = participant\n WHERE team = %(slug)s\n AND p.is_suspicious IS NOT true\n AND amount > 0\n AND is_funded\n )\n UPDATE teams t\n SET receiving = COALESCE((SELECT sum(amount) FROM our_receiving), 0)\n , nreceiving_from = COALESCE((SELECT count(*) FROM our_receiving), 0)\n , distributing = COALESCE((SELECT sum(amount) FROM our_receiving), 0)\n , ndistributing_to = 1\n WHERE t.slug = %(slug)s\n RETURNING receiving, nreceiving_from, distributing, ndistributing_to\n \"\"\", dict(slug=self.slug))\n\n\n # This next step is easy for now since we don't have payroll.\n from gratipay.models.participant import Participant\n Participant.from_username(self.owner).update_taking(cursor or self.db)\n\n self.set_attributes( receiving=r.receiving\n , nreceiving_from=r.nreceiving_from\n , distributing=r.distributing\n , ndistributing_to=r.ndistributing_to\n )\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n\n def to_dict(self):\n return {\n 'homepage': self.homepage,\n 'name': self.name,\n 'nreceiving_from': self.nreceiving_from,\n 'onboarding_url': self.onboarding_url,\n 'owner': '~' + self.owner,\n 'receiving': self.receiving,\n 'slug': self.slug,\n 'status': self.status,\n 'todo_url': self.todo_url\n }\n\n def migrate_tips(self):\n payment_instructions = self.db.all(\"\"\"\n SELECT pi.*\n FROM payment_instructions pi\n JOIN teams t ON t.slug = pi.team\n JOIN participants p ON t.owner = p.username\n WHERE p.username = %s\n AND pi.ctime < t.ctime\n \"\"\", (self.owner, ))\n\n # Make sure the migration hasn't been done already\n if payment_instructions:\n raise AlreadyMigrated\n\n return self.db.one(\"\"\"\n WITH rows AS (\n\n INSERT INTO payment_instructions\n (ctime, mtime, participant, team, amount, is_funded)\n SELECT ct.ctime\n , ct.mtime\n , ct.tipper\n , %(slug)s\n , ct.amount\n , ct.is_funded\n FROM current_tips ct\n JOIN participants p ON p.username = tipper\n WHERE ct.tippee=%(owner)s\n AND p.claimed_time IS NOT NULL\n AND p.is_suspicious IS NOT TRUE\n AND p.is_closed IS NOT TRUE\n RETURNING 1\n\n ) SELECT count(*) FROM rows;\n \"\"\", {'slug': self.slug, 'owner': self.owner})\n\n\n # Images\n # ======\n\n IMAGE_SIZES = ('original', 'large', 'small')\n\n def get_image_url(self, size):\n assert size in ('original', 'large', 'small'), size\n return '/{}/image?size={}'.format(self.slug, size)\n\n def save_image(self, original, large, small, image_type):\n with self.db.get_cursor() as c:\n oids = {}\n for size in self.IMAGE_SIZES:\n lobject = c.connection.lobject(getattr(self, 'image_oid_'+size), mode='wb')\n lobject.write(locals()[size])\n oids[size] = lobject.oid\n lobject.close()\n\n c.run(\"\"\"UPDATE teams\n SET image_oid_original=%s, image_oid_large=%s, image_oid_small=%s\n , image_type=%s\n WHERE id=%s\"\"\"\n , (oids['original'], oids['large'], oids['small'], image_type, self.id)\n )\n add_event(c, 'team', dict( action='upsert_image'\n , id=self.id\n , **oids\n ))\n self.set_attributes( image_type=image_type\n , **{'image_oid_'+size: oids[size] for size in oids}\n )\n return oids\n\n def load_image(self, size):\n assert size in self.IMAGE_SIZES, size\n image = None\n oid = getattr(self, 'image_oid_{}'.format(size))\n if oid != 0:\n with self.db.get_connection() as c:\n image = c.lobject(oid, mode='rb').read()\n return image\n\n\nclass AlreadyMigrated(Exception): pass\n", "path": "gratipay/models/team.py"}, {"content": "from gratipay.wireup import db, env\nfrom gratipay.models.team import Team, AlreadyMigrated\n\ndb = db(env())\n\nslugs = db.all(\"\"\"\n SELECT slug\n FROM teams\n WHERE is_approved IS TRUE\n\"\"\")\n\nfor slug in slugs:\n team = Team.from_slug(slug)\n try:\n ntips = team.migrate_tips()\n print(\"Migrated {} tip(s) for '{}'\".format(ntips, slug))\n except AlreadyMigrated:\n print(\"'%s' already migrated.\" % slug)\n\nprint(\"Done.\")\n", "path": "bin/migrate-tips.py"}], "after_files": [{"content": "\"\"\"Teams on Gratipay receive payments and distribute payroll.\n\"\"\"\nimport requests\nfrom aspen import json, log\nfrom gratipay.models import add_event\nfrom postgres.orm import Model\n\n\nclass Team(Model):\n \"\"\"Represent a Gratipay team.\n \"\"\"\n\n typname = 'teams'\n\n def __eq__(self, other):\n if not isinstance(other, Team):\n return False\n return self.id == other.id\n\n def __ne__(self, other):\n if not isinstance(other, Team):\n return True\n return self.id != other.id\n\n\n # Constructors\n # ============\n\n @classmethod\n def from_id(cls, id):\n \"\"\"Return an existing team based on id.\n \"\"\"\n return cls._from_thing(\"id\", id)\n\n @classmethod\n def from_slug(cls, slug):\n \"\"\"Return an existing team based on slug.\n \"\"\"\n return cls._from_thing(\"slug_lower\", slug.lower())\n\n @classmethod\n def _from_thing(cls, thing, value):\n assert thing in (\"id\", \"slug_lower\")\n return cls.db.one(\"\"\"\n\n SELECT teams.*::teams\n FROM teams\n WHERE {}=%s\n\n \"\"\".format(thing), (value,))\n\n @classmethod\n def insert(cls, owner, **fields):\n fields['slug_lower'] = fields['slug'].lower()\n fields['owner'] = owner.username\n return cls.db.one(\"\"\"\n\n INSERT INTO teams\n (slug, slug_lower, name, homepage,\n product_or_service, todo_url, onboarding_url,\n owner)\n VALUES (%(slug)s, %(slug_lower)s, %(name)s, %(homepage)s,\n %(product_or_service)s, %(todo_url)s, %(onboarding_url)s,\n %(owner)s)\n RETURNING teams.*::teams\n\n \"\"\", fields)\n\n\n def create_github_review_issue(self):\n \"\"\"POST to GitHub, and return the URL of the new issue.\n \"\"\"\n api_url = \"https://api.github.com/repos/{}/issues\".format(self.review_repo)\n data = json.dumps({ \"title\": self.name\n , \"body\": \"https://gratipay.com/{}/\\n\\n\".format(self.slug) +\n \"(This application will remain open for at least a week.)\"\n })\n out = ''\n try:\n r = requests.post(api_url, auth=self.review_auth, data=data)\n if r.status_code == 201:\n out = r.json()['html_url']\n else:\n log(r.status_code)\n log(r.text)\n err = str(r.status_code)\n except:\n err = \"eep\"\n if not out:\n out = \"https://github.com/gratipay/team-review/issues#error-{}\".format(err)\n return out\n\n\n def set_review_url(self, review_url):\n self.db.run(\"UPDATE teams SET review_url=%s WHERE id=%s\", (review_url, self.id))\n self.set_attributes(review_url=review_url)\n\n\n def get_og_title(self):\n out = self.name\n receiving = self.receiving\n if receiving > 0:\n out += \" receives $%.2f/wk\" % receiving\n else:\n out += \" is\"\n return out + \" on Gratipay\"\n\n\n def update_receiving(self, cursor=None):\n r = (cursor or self.db).one(\"\"\"\n WITH our_receiving AS (\n SELECT amount\n FROM current_payment_instructions\n JOIN participants p ON p.username = participant\n WHERE team = %(slug)s\n AND p.is_suspicious IS NOT true\n AND amount > 0\n AND is_funded\n )\n UPDATE teams t\n SET receiving = COALESCE((SELECT sum(amount) FROM our_receiving), 0)\n , nreceiving_from = COALESCE((SELECT count(*) FROM our_receiving), 0)\n , distributing = COALESCE((SELECT sum(amount) FROM our_receiving), 0)\n , ndistributing_to = 1\n WHERE t.slug = %(slug)s\n RETURNING receiving, nreceiving_from, distributing, ndistributing_to\n \"\"\", dict(slug=self.slug))\n\n\n # This next step is easy for now since we don't have payroll.\n from gratipay.models.participant import Participant\n Participant.from_username(self.owner).update_taking(cursor or self.db)\n\n self.set_attributes( receiving=r.receiving\n , nreceiving_from=r.nreceiving_from\n , distributing=r.distributing\n , ndistributing_to=r.ndistributing_to\n )\n\n @property\n def status(self):\n return { None: 'unreviewed'\n , False: 'rejected'\n , True: 'approved'\n }[self.is_approved]\n\n def to_dict(self):\n return {\n 'homepage': self.homepage,\n 'name': self.name,\n 'nreceiving_from': self.nreceiving_from,\n 'onboarding_url': self.onboarding_url,\n 'owner': '~' + self.owner,\n 'receiving': self.receiving,\n 'slug': self.slug,\n 'status': self.status,\n 'todo_url': self.todo_url\n }\n\n def migrate_tips(self):\n payment_instructions = self.db.all(\"\"\"\n SELECT pi.*\n FROM payment_instructions pi\n JOIN teams t ON t.slug = pi.team\n WHERE t.owner = %s\n AND pi.ctime < t.ctime\n \"\"\", (self.owner, ))\n\n # Make sure the migration hasn't been done already\n if payment_instructions:\n raise AlreadyMigrated\n\n return self.db.one(\"\"\"\n WITH rows AS (\n\n INSERT INTO payment_instructions\n (ctime, mtime, participant, team, amount, is_funded)\n SELECT ct.ctime\n , ct.mtime\n , ct.tipper\n , %(slug)s\n , ct.amount\n , ct.is_funded\n FROM current_tips ct\n JOIN participants p ON p.username = tipper\n WHERE ct.tippee=%(owner)s\n AND p.claimed_time IS NOT NULL\n AND p.is_suspicious IS NOT TRUE\n AND p.is_closed IS NOT TRUE\n RETURNING 1\n\n ) SELECT count(*) FROM rows;\n \"\"\", {'slug': self.slug, 'owner': self.owner})\n\n\n # Images\n # ======\n\n IMAGE_SIZES = ('original', 'large', 'small')\n\n def get_image_url(self, size):\n assert size in ('original', 'large', 'small'), size\n return '/{}/image?size={}'.format(self.slug, size)\n\n def save_image(self, original, large, small, image_type):\n with self.db.get_cursor() as c:\n oids = {}\n for size in self.IMAGE_SIZES:\n lobject = c.connection.lobject(getattr(self, 'image_oid_'+size), mode='wb')\n lobject.write(locals()[size])\n oids[size] = lobject.oid\n lobject.close()\n\n c.run(\"\"\"UPDATE teams\n SET image_oid_original=%s, image_oid_large=%s, image_oid_small=%s\n , image_type=%s\n WHERE id=%s\"\"\"\n , (oids['original'], oids['large'], oids['small'], image_type, self.id)\n )\n add_event(c, 'team', dict( action='upsert_image'\n , id=self.id\n , **oids\n ))\n self.set_attributes( image_type=image_type\n , **{'image_oid_'+size: oids[size] for size in oids}\n )\n return oids\n\n def load_image(self, size):\n assert size in self.IMAGE_SIZES, size\n image = None\n oid = getattr(self, 'image_oid_{}'.format(size))\n if oid != 0:\n with self.db.get_connection() as c:\n image = c.lobject(oid, mode='rb').read()\n return image\n\n\nclass AlreadyMigrated(Exception): pass\n", "path": "gratipay/models/team.py"}, {"content": "from gratipay.wireup import db, env\nfrom gratipay.models.team import AlreadyMigrated\n\ndb = db(env())\n\nteams = db.all(\"\"\"\n SELECT distinct ON (t.slug) t.*::teams\n FROM teams t\n JOIN tips ON t.owner = tips.tippee -- Only fetch teams whose owners had tips under Gratipay 1.0\n WHERE t.is_approved IS TRUE -- Only fetch approved teams\n AND NOT EXISTS ( -- Make sure tips haven't been migrated for any teams with same owner\n SELECT 1\n FROM payment_instructions pi\n JOIN teams t2 ON t2.slug = pi.team\n WHERE t2.owner = t.owner\n AND pi.ctime < t2.ctime\n )\n\"\"\")\n\nfor team in teams:\n try:\n ntips = team.migrate_tips()\n print(\"Migrated {} tip(s) for '{}'\".format(ntips, team.slug))\n except AlreadyMigrated:\n print(\"'%s' already migrated.\" % team.slug)\n\nprint(\"Done.\")\n", "path": "bin/migrate-tips.py"}]} | 3,028 | 488 |
gh_patches_debug_28750 | rasdani/github-patches | git_diff | mkdocs__mkdocs-2478 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Allow mkdocs.yaml config file (.yaml extension)
Even though `.yml` is commonly used, `.yaml` is actually preferred:
https://yaml.org/faq.html
> Is there an official extension for YAML files?
>
> Please use ".yaml" when possible.
It would therefore be great if you would allow `mkdocs.yaml`.
Somewhat related t #2164
Allow mkdocs.yaml when '--config' is not passed
We wanted to give our colleagues to possibility to build docs automatically when they put a `mkdocs.yml` in the docs folder of their repository. But it happened often, that they used `.yaml` instead the `.yml`.
When this PR is accepted, it will look first look for `mkdocs.yml` when `--config` is not provided. If this file is not present, it will try to find `mkdocs.yaml`.
When both are not present, it behaves like before.
When both are present, it behaves like before.
If only the `.yaml` version it present, this one will be used as the config file.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/config/base.py`
Content:
```
1 import logging
2 import os
3 import sys
4 from yaml import YAMLError
5 from collections import UserDict
6 from contextlib import contextmanager
7
8 from mkdocs import exceptions
9 from mkdocs import utils
10
11
12 log = logging.getLogger('mkdocs.config')
13
14
15 class ValidationError(Exception):
16 """Raised during the validation process of the config on errors."""
17
18
19 class Config(UserDict):
20 """
21 MkDocs Configuration dict
22
23 This is a fairly simple extension of a standard dictionary. It adds methods
24 for running validation on the structure and contents.
25 """
26
27 def __init__(self, schema, config_file_path=None):
28 """
29 The schema is a Python dict which maps the config name to a validator.
30 """
31
32 self._schema = schema
33 self._schema_keys = set(dict(schema).keys())
34 # Ensure config_file_path is a Unicode string
35 if config_file_path is not None and not isinstance(config_file_path, str):
36 try:
37 # Assume config_file_path is encoded with the file system encoding.
38 config_file_path = config_file_path.decode(encoding=sys.getfilesystemencoding())
39 except UnicodeDecodeError:
40 raise ValidationError("config_file_path is not a Unicode string.")
41 self.config_file_path = config_file_path
42 self.data = {}
43
44 self.user_configs = []
45 self.set_defaults()
46
47 def set_defaults(self):
48 """
49 Set the base config by going through each validator and getting the
50 default if it has one.
51 """
52
53 for key, config_option in self._schema:
54 self[key] = config_option.default
55
56 def _validate(self):
57
58 failed, warnings = [], []
59
60 for key, config_option in self._schema:
61 try:
62 value = self.get(key)
63 self[key] = config_option.validate(value)
64 warnings.extend([(key, w) for w in config_option.warnings])
65 config_option.reset_warnings()
66 except ValidationError as e:
67 failed.append((key, e))
68
69 for key in (set(self.keys()) - self._schema_keys):
70 warnings.append((
71 key, f"Unrecognised configuration name: {key}"
72 ))
73
74 return failed, warnings
75
76 def _pre_validate(self):
77
78 failed, warnings = [], []
79
80 for key, config_option in self._schema:
81 try:
82 config_option.pre_validation(self, key_name=key)
83 warnings.extend([(key, w) for w in config_option.warnings])
84 config_option.reset_warnings()
85 except ValidationError as e:
86 failed.append((key, e))
87
88 return failed, warnings
89
90 def _post_validate(self):
91
92 failed, warnings = [], []
93
94 for key, config_option in self._schema:
95 try:
96 config_option.post_validation(self, key_name=key)
97 warnings.extend([(key, w) for w in config_option.warnings])
98 config_option.reset_warnings()
99 except ValidationError as e:
100 failed.append((key, e))
101
102 return failed, warnings
103
104 def validate(self):
105
106 failed, warnings = self._pre_validate()
107
108 run_failed, run_warnings = self._validate()
109
110 failed.extend(run_failed)
111 warnings.extend(run_warnings)
112
113 # Only run the post validation steps if there are no failures, warnings
114 # are okay.
115 if len(failed) == 0:
116 post_failed, post_warnings = self._post_validate()
117 failed.extend(post_failed)
118 warnings.extend(post_warnings)
119
120 return failed, warnings
121
122 def load_dict(self, patch):
123 """ Load config options from a dictionary. """
124
125 if not isinstance(patch, dict):
126 raise exceptions.ConfigurationError(
127 "The configuration is invalid. The expected type was a key "
128 "value mapping (a python dict) but we got an object of type: "
129 "{}".format(type(patch)))
130
131 self.user_configs.append(patch)
132 self.data.update(patch)
133
134 def load_file(self, config_file):
135 """ Load config options from the open file descriptor of a YAML file. """
136 try:
137 return self.load_dict(utils.yaml_load(config_file))
138 except YAMLError as e:
139 # MkDocs knows and understands ConfigurationErrors
140 raise exceptions.ConfigurationError(
141 f"MkDocs encountered an error parsing the configuration file: {e}"
142 )
143
144
145 @contextmanager
146 def _open_config_file(config_file):
147 """
148 A context manager which yields an open file descriptor ready to be read.
149
150 Accepts a filename as a string, an open or closed file descriptor, or None.
151 When None, it defaults to `mkdocs.yml` in the CWD. If a closed file descriptor
152 is received, a new file descriptor is opened for the same file.
153
154 The file descriptor is automaticaly closed when the context manager block is existed.
155 """
156
157 # Default to the standard config filename.
158 if config_file is None:
159 config_file = os.path.abspath('mkdocs.yml')
160
161 # If closed file descriptor, get file path to reopen later.
162 if hasattr(config_file, 'closed') and config_file.closed:
163 config_file = config_file.name
164
165 log.debug(f"Loading configuration file: {config_file}")
166
167 # If it is a string, we can assume it is a path and attempt to open it.
168 if isinstance(config_file, str):
169 if os.path.exists(config_file):
170 config_file = open(config_file, 'rb')
171 else:
172 raise exceptions.ConfigurationError(
173 f"Config file '{config_file}' does not exist.")
174
175 # Ensure file descriptor is at begining
176 config_file.seek(0)
177 try:
178 yield config_file
179 finally:
180 if hasattr(config_file, 'close'):
181 config_file.close()
182
183
184 def load_config(config_file=None, **kwargs):
185 """
186 Load the configuration for a given file object or name
187
188 The config_file can either be a file object, string or None. If it is None
189 the default `mkdocs.yml` filename will loaded.
190
191 Extra kwargs are passed to the configuration to replace any default values
192 unless they themselves are None.
193 """
194 options = kwargs.copy()
195
196 # Filter None values from the options. This usually happens with optional
197 # parameters from Click.
198 for key, value in options.copy().items():
199 if value is None:
200 options.pop(key)
201
202 with _open_config_file(config_file) as fd:
203 options['config_file_path'] = getattr(fd, 'name', '')
204
205 # Initialise the config with the default schema.
206 from mkdocs.config.defaults import get_schema
207 cfg = Config(schema=get_schema(), config_file_path=options['config_file_path'])
208 # load the config file
209 cfg.load_file(fd)
210
211 # Then load the options to overwrite anything in the config.
212 cfg.load_dict(options)
213
214 errors, warnings = cfg.validate()
215
216 for config_name, warning in warnings:
217 log.warning(f"Config value: '{config_name}'. Warning: {warning}")
218
219 for config_name, error in errors:
220 log.error(f"Config value: '{config_name}'. Error: {error}")
221
222 for key, value in cfg.items():
223 log.debug(f"Config value: '{key}' = {value!r}")
224
225 if len(errors) > 0:
226 raise exceptions.Abort(
227 "Aborted with {} Configuration Errors!".format(len(errors))
228 )
229 elif cfg['strict'] and len(warnings) > 0:
230 raise exceptions.Abort(
231 "Aborted with {} Configuration Warnings in 'strict' mode!".format(len(warnings))
232 )
233
234 return cfg
235
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mkdocs/config/base.py b/mkdocs/config/base.py
--- a/mkdocs/config/base.py
+++ b/mkdocs/config/base.py
@@ -156,24 +156,34 @@
# Default to the standard config filename.
if config_file is None:
- config_file = os.path.abspath('mkdocs.yml')
-
- # If closed file descriptor, get file path to reopen later.
- if hasattr(config_file, 'closed') and config_file.closed:
- config_file = config_file.name
-
- log.debug(f"Loading configuration file: {config_file}")
-
+ paths_to_try = ['mkdocs.yml', 'mkdocs.yaml']
# If it is a string, we can assume it is a path and attempt to open it.
- if isinstance(config_file, str):
- if os.path.exists(config_file):
- config_file = open(config_file, 'rb')
+ elif isinstance(config_file, str):
+ paths_to_try = [config_file]
+ # If closed file descriptor, get file path to reopen later.
+ elif getattr(config_file, 'closed', False):
+ paths_to_try = [config_file.name]
+ else:
+ paths_to_try = None
+
+ if paths_to_try:
+ # config_file is not a file descriptor, so open it as a path.
+ for path in paths_to_try:
+ path = os.path.abspath(path)
+ log.debug(f"Loading configuration file: {path}")
+ try:
+ config_file = open(path, 'rb')
+ break
+ except FileNotFoundError:
+ continue
else:
raise exceptions.ConfigurationError(
- f"Config file '{config_file}' does not exist.")
+ f"Config file '{paths_to_try[0]}' does not exist.")
+ else:
+ log.debug(f"Loading configuration file: {config_file}")
+ # Ensure file descriptor is at begining
+ config_file.seek(0)
- # Ensure file descriptor is at begining
- config_file.seek(0)
try:
yield config_file
finally:
| {"golden_diff": "diff --git a/mkdocs/config/base.py b/mkdocs/config/base.py\n--- a/mkdocs/config/base.py\n+++ b/mkdocs/config/base.py\n@@ -156,24 +156,34 @@\n \n # Default to the standard config filename.\n if config_file is None:\n- config_file = os.path.abspath('mkdocs.yml')\n-\n- # If closed file descriptor, get file path to reopen later.\n- if hasattr(config_file, 'closed') and config_file.closed:\n- config_file = config_file.name\n-\n- log.debug(f\"Loading configuration file: {config_file}\")\n-\n+ paths_to_try = ['mkdocs.yml', 'mkdocs.yaml']\n # If it is a string, we can assume it is a path and attempt to open it.\n- if isinstance(config_file, str):\n- if os.path.exists(config_file):\n- config_file = open(config_file, 'rb')\n+ elif isinstance(config_file, str):\n+ paths_to_try = [config_file]\n+ # If closed file descriptor, get file path to reopen later.\n+ elif getattr(config_file, 'closed', False):\n+ paths_to_try = [config_file.name]\n+ else:\n+ paths_to_try = None\n+\n+ if paths_to_try:\n+ # config_file is not a file descriptor, so open it as a path.\n+ for path in paths_to_try:\n+ path = os.path.abspath(path)\n+ log.debug(f\"Loading configuration file: {path}\")\n+ try:\n+ config_file = open(path, 'rb')\n+ break\n+ except FileNotFoundError:\n+ continue\n else:\n raise exceptions.ConfigurationError(\n- f\"Config file '{config_file}' does not exist.\")\n+ f\"Config file '{paths_to_try[0]}' does not exist.\")\n+ else:\n+ log.debug(f\"Loading configuration file: {config_file}\")\n+ # Ensure file descriptor is at begining\n+ config_file.seek(0)\n \n- # Ensure file descriptor is at begining\n- config_file.seek(0)\n try:\n yield config_file\n finally:\n", "issue": "Allow mkdocs.yaml config file (.yaml extension)\nEven though `.yml` is commonly used, `.yaml` is actually preferred:\r\n\r\nhttps://yaml.org/faq.html\r\n\r\n> Is there an official extension for YAML files?\r\n>\r\n> Please use \".yaml\" when possible.\r\n\r\nIt would therefore be great if you would allow `mkdocs.yaml`.\r\n\r\nSomewhat related t #2164\r\n\nAllow mkdocs.yaml when '--config' is not passed\nWe wanted to give our colleagues to possibility to build docs automatically when they put a `mkdocs.yml` in the docs folder of their repository. But it happened often, that they used `.yaml` instead the `.yml`.\r\n\r\nWhen this PR is accepted, it will look first look for `mkdocs.yml` when `--config` is not provided. If this file is not present, it will try to find `mkdocs.yaml`.\r\nWhen both are not present, it behaves like before.\r\nWhen both are present, it behaves like before.\r\nIf only the `.yaml` version it present, this one will be used as the config file.\n", "before_files": [{"content": "import logging\nimport os\nimport sys\nfrom yaml import YAMLError\nfrom collections import UserDict\nfrom contextlib import contextmanager\n\nfrom mkdocs import exceptions\nfrom mkdocs import utils\n\n\nlog = logging.getLogger('mkdocs.config')\n\n\nclass ValidationError(Exception):\n \"\"\"Raised during the validation process of the config on errors.\"\"\"\n\n\nclass Config(UserDict):\n \"\"\"\n MkDocs Configuration dict\n\n This is a fairly simple extension of a standard dictionary. It adds methods\n for running validation on the structure and contents.\n \"\"\"\n\n def __init__(self, schema, config_file_path=None):\n \"\"\"\n The schema is a Python dict which maps the config name to a validator.\n \"\"\"\n\n self._schema = schema\n self._schema_keys = set(dict(schema).keys())\n # Ensure config_file_path is a Unicode string\n if config_file_path is not None and not isinstance(config_file_path, str):\n try:\n # Assume config_file_path is encoded with the file system encoding.\n config_file_path = config_file_path.decode(encoding=sys.getfilesystemencoding())\n except UnicodeDecodeError:\n raise ValidationError(\"config_file_path is not a Unicode string.\")\n self.config_file_path = config_file_path\n self.data = {}\n\n self.user_configs = []\n self.set_defaults()\n\n def set_defaults(self):\n \"\"\"\n Set the base config by going through each validator and getting the\n default if it has one.\n \"\"\"\n\n for key, config_option in self._schema:\n self[key] = config_option.default\n\n def _validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n value = self.get(key)\n self[key] = config_option.validate(value)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n for key in (set(self.keys()) - self._schema_keys):\n warnings.append((\n key, f\"Unrecognised configuration name: {key}\"\n ))\n\n return failed, warnings\n\n def _pre_validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n config_option.pre_validation(self, key_name=key)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n return failed, warnings\n\n def _post_validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n config_option.post_validation(self, key_name=key)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n return failed, warnings\n\n def validate(self):\n\n failed, warnings = self._pre_validate()\n\n run_failed, run_warnings = self._validate()\n\n failed.extend(run_failed)\n warnings.extend(run_warnings)\n\n # Only run the post validation steps if there are no failures, warnings\n # are okay.\n if len(failed) == 0:\n post_failed, post_warnings = self._post_validate()\n failed.extend(post_failed)\n warnings.extend(post_warnings)\n\n return failed, warnings\n\n def load_dict(self, patch):\n \"\"\" Load config options from a dictionary. \"\"\"\n\n if not isinstance(patch, dict):\n raise exceptions.ConfigurationError(\n \"The configuration is invalid. The expected type was a key \"\n \"value mapping (a python dict) but we got an object of type: \"\n \"{}\".format(type(patch)))\n\n self.user_configs.append(patch)\n self.data.update(patch)\n\n def load_file(self, config_file):\n \"\"\" Load config options from the open file descriptor of a YAML file. \"\"\"\n try:\n return self.load_dict(utils.yaml_load(config_file))\n except YAMLError as e:\n # MkDocs knows and understands ConfigurationErrors\n raise exceptions.ConfigurationError(\n f\"MkDocs encountered an error parsing the configuration file: {e}\"\n )\n\n\n@contextmanager\ndef _open_config_file(config_file):\n \"\"\"\n A context manager which yields an open file descriptor ready to be read.\n\n Accepts a filename as a string, an open or closed file descriptor, or None.\n When None, it defaults to `mkdocs.yml` in the CWD. If a closed file descriptor\n is received, a new file descriptor is opened for the same file.\n\n The file descriptor is automaticaly closed when the context manager block is existed.\n \"\"\"\n\n # Default to the standard config filename.\n if config_file is None:\n config_file = os.path.abspath('mkdocs.yml')\n\n # If closed file descriptor, get file path to reopen later.\n if hasattr(config_file, 'closed') and config_file.closed:\n config_file = config_file.name\n\n log.debug(f\"Loading configuration file: {config_file}\")\n\n # If it is a string, we can assume it is a path and attempt to open it.\n if isinstance(config_file, str):\n if os.path.exists(config_file):\n config_file = open(config_file, 'rb')\n else:\n raise exceptions.ConfigurationError(\n f\"Config file '{config_file}' does not exist.\")\n\n # Ensure file descriptor is at begining\n config_file.seek(0)\n try:\n yield config_file\n finally:\n if hasattr(config_file, 'close'):\n config_file.close()\n\n\ndef load_config(config_file=None, **kwargs):\n \"\"\"\n Load the configuration for a given file object or name\n\n The config_file can either be a file object, string or None. If it is None\n the default `mkdocs.yml` filename will loaded.\n\n Extra kwargs are passed to the configuration to replace any default values\n unless they themselves are None.\n \"\"\"\n options = kwargs.copy()\n\n # Filter None values from the options. This usually happens with optional\n # parameters from Click.\n for key, value in options.copy().items():\n if value is None:\n options.pop(key)\n\n with _open_config_file(config_file) as fd:\n options['config_file_path'] = getattr(fd, 'name', '')\n\n # Initialise the config with the default schema.\n from mkdocs.config.defaults import get_schema\n cfg = Config(schema=get_schema(), config_file_path=options['config_file_path'])\n # load the config file\n cfg.load_file(fd)\n\n # Then load the options to overwrite anything in the config.\n cfg.load_dict(options)\n\n errors, warnings = cfg.validate()\n\n for config_name, warning in warnings:\n log.warning(f\"Config value: '{config_name}'. Warning: {warning}\")\n\n for config_name, error in errors:\n log.error(f\"Config value: '{config_name}'. Error: {error}\")\n\n for key, value in cfg.items():\n log.debug(f\"Config value: '{key}' = {value!r}\")\n\n if len(errors) > 0:\n raise exceptions.Abort(\n \"Aborted with {} Configuration Errors!\".format(len(errors))\n )\n elif cfg['strict'] and len(warnings) > 0:\n raise exceptions.Abort(\n \"Aborted with {} Configuration Warnings in 'strict' mode!\".format(len(warnings))\n )\n\n return cfg\n", "path": "mkdocs/config/base.py"}], "after_files": [{"content": "import logging\nimport os\nimport sys\nfrom yaml import YAMLError\nfrom collections import UserDict\nfrom contextlib import contextmanager\n\nfrom mkdocs import exceptions\nfrom mkdocs import utils\n\n\nlog = logging.getLogger('mkdocs.config')\n\n\nclass ValidationError(Exception):\n \"\"\"Raised during the validation process of the config on errors.\"\"\"\n\n\nclass Config(UserDict):\n \"\"\"\n MkDocs Configuration dict\n\n This is a fairly simple extension of a standard dictionary. It adds methods\n for running validation on the structure and contents.\n \"\"\"\n\n def __init__(self, schema, config_file_path=None):\n \"\"\"\n The schema is a Python dict which maps the config name to a validator.\n \"\"\"\n\n self._schema = schema\n self._schema_keys = set(dict(schema).keys())\n # Ensure config_file_path is a Unicode string\n if config_file_path is not None and not isinstance(config_file_path, str):\n try:\n # Assume config_file_path is encoded with the file system encoding.\n config_file_path = config_file_path.decode(encoding=sys.getfilesystemencoding())\n except UnicodeDecodeError:\n raise ValidationError(\"config_file_path is not a Unicode string.\")\n self.config_file_path = config_file_path\n self.data = {}\n\n self.user_configs = []\n self.set_defaults()\n\n def set_defaults(self):\n \"\"\"\n Set the base config by going through each validator and getting the\n default if it has one.\n \"\"\"\n\n for key, config_option in self._schema:\n self[key] = config_option.default\n\n def _validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n value = self.get(key)\n self[key] = config_option.validate(value)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n for key in (set(self.keys()) - self._schema_keys):\n warnings.append((\n key, f\"Unrecognised configuration name: {key}\"\n ))\n\n return failed, warnings\n\n def _pre_validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n config_option.pre_validation(self, key_name=key)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n return failed, warnings\n\n def _post_validate(self):\n\n failed, warnings = [], []\n\n for key, config_option in self._schema:\n try:\n config_option.post_validation(self, key_name=key)\n warnings.extend([(key, w) for w in config_option.warnings])\n config_option.reset_warnings()\n except ValidationError as e:\n failed.append((key, e))\n\n return failed, warnings\n\n def validate(self):\n\n failed, warnings = self._pre_validate()\n\n run_failed, run_warnings = self._validate()\n\n failed.extend(run_failed)\n warnings.extend(run_warnings)\n\n # Only run the post validation steps if there are no failures, warnings\n # are okay.\n if len(failed) == 0:\n post_failed, post_warnings = self._post_validate()\n failed.extend(post_failed)\n warnings.extend(post_warnings)\n\n return failed, warnings\n\n def load_dict(self, patch):\n \"\"\" Load config options from a dictionary. \"\"\"\n\n if not isinstance(patch, dict):\n raise exceptions.ConfigurationError(\n \"The configuration is invalid. The expected type was a key \"\n \"value mapping (a python dict) but we got an object of type: \"\n \"{}\".format(type(patch)))\n\n self.user_configs.append(patch)\n self.data.update(patch)\n\n def load_file(self, config_file):\n \"\"\" Load config options from the open file descriptor of a YAML file. \"\"\"\n try:\n return self.load_dict(utils.yaml_load(config_file))\n except YAMLError as e:\n # MkDocs knows and understands ConfigurationErrors\n raise exceptions.ConfigurationError(\n f\"MkDocs encountered an error parsing the configuration file: {e}\"\n )\n\n\n@contextmanager\ndef _open_config_file(config_file):\n \"\"\"\n A context manager which yields an open file descriptor ready to be read.\n\n Accepts a filename as a string, an open or closed file descriptor, or None.\n When None, it defaults to `mkdocs.yml` in the CWD. If a closed file descriptor\n is received, a new file descriptor is opened for the same file.\n\n The file descriptor is automaticaly closed when the context manager block is existed.\n \"\"\"\n\n # Default to the standard config filename.\n if config_file is None:\n paths_to_try = ['mkdocs.yml', 'mkdocs.yaml']\n # If it is a string, we can assume it is a path and attempt to open it.\n elif isinstance(config_file, str):\n paths_to_try = [config_file]\n # If closed file descriptor, get file path to reopen later.\n elif getattr(config_file, 'closed', False):\n paths_to_try = [config_file.name]\n else:\n paths_to_try = None\n\n if paths_to_try:\n # config_file is not a file descriptor, so open it as a path.\n for path in paths_to_try:\n path = os.path.abspath(path)\n log.debug(f\"Loading configuration file: {path}\")\n try:\n config_file = open(path, 'rb')\n break\n except FileNotFoundError:\n continue\n else:\n raise exceptions.ConfigurationError(\n f\"Config file '{paths_to_try[0]}' does not exist.\")\n else:\n log.debug(f\"Loading configuration file: {config_file}\")\n # Ensure file descriptor is at begining\n config_file.seek(0)\n\n try:\n yield config_file\n finally:\n if hasattr(config_file, 'close'):\n config_file.close()\n\n\ndef load_config(config_file=None, **kwargs):\n \"\"\"\n Load the configuration for a given file object or name\n\n The config_file can either be a file object, string or None. If it is None\n the default `mkdocs.yml` filename will loaded.\n\n Extra kwargs are passed to the configuration to replace any default values\n unless they themselves are None.\n \"\"\"\n options = kwargs.copy()\n\n # Filter None values from the options. This usually happens with optional\n # parameters from Click.\n for key, value in options.copy().items():\n if value is None:\n options.pop(key)\n\n with _open_config_file(config_file) as fd:\n options['config_file_path'] = getattr(fd, 'name', '')\n\n # Initialise the config with the default schema.\n from mkdocs.config.defaults import get_schema\n cfg = Config(schema=get_schema(), config_file_path=options['config_file_path'])\n # load the config file\n cfg.load_file(fd)\n\n # Then load the options to overwrite anything in the config.\n cfg.load_dict(options)\n\n errors, warnings = cfg.validate()\n\n for config_name, warning in warnings:\n log.warning(f\"Config value: '{config_name}'. Warning: {warning}\")\n\n for config_name, error in errors:\n log.error(f\"Config value: '{config_name}'. Error: {error}\")\n\n for key, value in cfg.items():\n log.debug(f\"Config value: '{key}' = {value!r}\")\n\n if len(errors) > 0:\n raise exceptions.Abort(\n \"Aborted with {} Configuration Errors!\".format(len(errors))\n )\n elif cfg['strict'] and len(warnings) > 0:\n raise exceptions.Abort(\n \"Aborted with {} Configuration Warnings in 'strict' mode!\".format(len(warnings))\n )\n\n return cfg\n", "path": "mkdocs/config/base.py"}]} | 2,683 | 468 |
gh_patches_debug_34616 | rasdani/github-patches | git_diff | biolab__orange3-4255 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PCA: retain all data (Unification of embedding methods )
**Issue:** PCA discards the original attributes and replaces them with PCs. Often (e.g. in scOrange), it is desirable to use the original features: annotate PCA plots, compute differentially-occurring features in different PCA regions, etc.
**Proposed solution**: Place PCs in meta attributes and rename output to *Data*.
This would be consistent to *t-SNE* and *MDS*. The user could use *Select columns* to restore current behaviour. This is substantially easier (number of PCs is typically small) than merging original attributes with principal components, as it's possible in the current version.
Such an improvement would also play nice with the new widget *Apply domain*, which enables projecting data to existing PCs and/or attributes.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Orange/widgets/unsupervised/owpca.py`
Content:
```
1 import numbers
2
3 import numpy
4 from AnyQt.QtWidgets import QFormLayout
5 from AnyQt.QtCore import Qt
6
7 from Orange.data import Table, Domain, StringVariable, ContinuousVariable
8 from Orange.data.sql.table import SqlTable, AUTO_DL_LIMIT
9 from Orange.preprocess import preprocess
10 from Orange.projection import PCA
11 from Orange.widgets import widget, gui, settings
12 from Orange.widgets.utils.slidergraph import SliderGraph
13 from Orange.widgets.utils.widgetpreview import WidgetPreview
14 from Orange.widgets.widget import Input, Output
15
16
17 # Maximum number of PCA components that we can set in the widget
18 MAX_COMPONENTS = 100
19 LINE_NAMES = ["component variance", "cumulative variance"]
20
21
22 class OWPCA(widget.OWWidget):
23 name = "PCA"
24 description = "Principal component analysis with a scree-diagram."
25 icon = "icons/PCA.svg"
26 priority = 3050
27 keywords = ["principal component analysis", "linear transformation"]
28
29 class Inputs:
30 data = Input("Data", Table)
31
32 class Outputs:
33 transformed_data = Output("Transformed Data", Table, replaces=["Transformed data"])
34 components = Output("Components", Table)
35 pca = Output("PCA", PCA, dynamic=False)
36
37 settingsHandler = settings.DomainContextHandler()
38
39 ncomponents = settings.Setting(2)
40 variance_covered = settings.Setting(100)
41 auto_commit = settings.Setting(True)
42 normalize = settings.ContextSetting(True)
43 maxp = settings.Setting(20)
44 axis_labels = settings.Setting(10)
45
46 graph_name = "plot.plotItem"
47
48 class Warning(widget.OWWidget.Warning):
49 trivial_components = widget.Msg(
50 "All components of the PCA are trivial (explain 0 variance). "
51 "Input data is constant (or near constant).")
52
53 class Error(widget.OWWidget.Error):
54 no_features = widget.Msg("At least 1 feature is required")
55 no_instances = widget.Msg("At least 1 data instance is required")
56
57 def __init__(self):
58 super().__init__()
59 self.data = None
60
61 self._pca = None
62 self._transformed = None
63 self._variance_ratio = None
64 self._cumulative = None
65 self._init_projector()
66
67 # Components Selection
68 box = gui.vBox(self.controlArea, "Components Selection")
69 form = QFormLayout()
70 box.layout().addLayout(form)
71
72 self.components_spin = gui.spin(
73 box, self, "ncomponents", 1, MAX_COMPONENTS,
74 callback=self._update_selection_component_spin,
75 keyboardTracking=False
76 )
77 self.components_spin.setSpecialValueText("All")
78
79 self.variance_spin = gui.spin(
80 box, self, "variance_covered", 1, 100,
81 callback=self._update_selection_variance_spin,
82 keyboardTracking=False
83 )
84 self.variance_spin.setSuffix("%")
85
86 form.addRow("Components:", self.components_spin)
87 form.addRow("Explained variance:", self.variance_spin)
88
89 # Options
90 self.options_box = gui.vBox(self.controlArea, "Options")
91 self.normalize_box = gui.checkBox(
92 self.options_box, self, "normalize",
93 "Normalize variables", callback=self._update_normalize
94 )
95
96 self.maxp_spin = gui.spin(
97 self.options_box, self, "maxp", 1, MAX_COMPONENTS,
98 label="Show only first", callback=self._setup_plot,
99 keyboardTracking=False
100 )
101
102 self.controlArea.layout().addStretch()
103
104 gui.auto_apply(self.controlArea, self, "auto_commit")
105
106 self.plot = SliderGraph(
107 "Principal Components", "Proportion of variance",
108 self._on_cut_changed)
109
110 self.mainArea.layout().addWidget(self.plot)
111 self._update_normalize()
112
113 @Inputs.data
114 def set_data(self, data):
115 self.closeContext()
116 self.clear_messages()
117 self.clear()
118 self.information()
119 self.data = None
120 if isinstance(data, SqlTable):
121 if data.approx_len() < AUTO_DL_LIMIT:
122 data = Table(data)
123 else:
124 self.information("Data has been sampled")
125 data_sample = data.sample_time(1, no_cache=True)
126 data_sample.download_data(2000, partial=True)
127 data = Table(data_sample)
128 if isinstance(data, Table):
129 if not data.domain.attributes:
130 self.Error.no_features()
131 self.clear_outputs()
132 return
133 if not data:
134 self.Error.no_instances()
135 self.clear_outputs()
136 return
137
138 self.openContext(data)
139 self._init_projector()
140
141 self.data = data
142 self.fit()
143
144 def fit(self):
145 self.clear()
146 self.Warning.trivial_components.clear()
147 if self.data is None:
148 return
149
150 data = self.data
151
152 if self.normalize:
153 self._pca_projector.preprocessors = \
154 self._pca_preprocessors + [preprocess.Normalize(center=False)]
155 else:
156 self._pca_projector.preprocessors = self._pca_preprocessors
157
158 if not isinstance(data, SqlTable):
159 pca = self._pca_projector(data)
160 variance_ratio = pca.explained_variance_ratio_
161 cumulative = numpy.cumsum(variance_ratio)
162
163 if numpy.isfinite(cumulative[-1]):
164 self.components_spin.setRange(0, len(cumulative))
165 self._pca = pca
166 self._variance_ratio = variance_ratio
167 self._cumulative = cumulative
168 self._setup_plot()
169 else:
170 self.Warning.trivial_components()
171
172 self.unconditional_commit()
173
174 def clear(self):
175 self._pca = None
176 self._transformed = None
177 self._variance_ratio = None
178 self._cumulative = None
179 self.plot.clear_plot()
180
181 def clear_outputs(self):
182 self.Outputs.transformed_data.send(None)
183 self.Outputs.components.send(None)
184 self.Outputs.pca.send(self._pca_projector)
185
186 def _setup_plot(self):
187 if self._pca is None:
188 self.plot.clear_plot()
189 return
190
191 explained_ratio = self._variance_ratio
192 explained = self._cumulative
193 cutpos = self._nselected_components()
194 p = min(len(self._variance_ratio), self.maxp)
195
196 self.plot.update(
197 numpy.arange(1, p+1), [explained_ratio[:p], explained[:p]],
198 [Qt.red, Qt.darkYellow], cutpoint_x=cutpos, names=LINE_NAMES)
199
200 self._update_axis()
201
202 def _on_cut_changed(self, components):
203 if components == self.ncomponents \
204 or self.ncomponents == 0 \
205 or self._pca is not None \
206 and components == len(self._variance_ratio):
207 return
208
209 self.ncomponents = components
210 if self._pca is not None:
211 var = self._cumulative[components - 1]
212 if numpy.isfinite(var):
213 self.variance_covered = int(var * 100)
214
215 self._invalidate_selection()
216
217 def _update_selection_component_spin(self):
218 # cut changed by "ncomponents" spin.
219 if self._pca is None:
220 self._invalidate_selection()
221 return
222
223 if self.ncomponents == 0:
224 # Special "All" value
225 cut = len(self._variance_ratio)
226 else:
227 cut = self.ncomponents
228
229 var = self._cumulative[cut - 1]
230 if numpy.isfinite(var):
231 self.variance_covered = int(var * 100)
232
233 self.plot.set_cut_point(cut)
234 self._invalidate_selection()
235
236 def _update_selection_variance_spin(self):
237 # cut changed by "max variance" spin.
238 if self._pca is None:
239 return
240
241 cut = numpy.searchsorted(self._cumulative,
242 self.variance_covered / 100.0) + 1
243 cut = min(cut, len(self._cumulative))
244 self.ncomponents = cut
245 self.plot.set_cut_point(cut)
246 self._invalidate_selection()
247
248 def _update_normalize(self):
249 self.fit()
250 if self.data is None:
251 self._invalidate_selection()
252
253 def _init_projector(self):
254 self._pca_projector = PCA(n_components=MAX_COMPONENTS, random_state=0)
255 self._pca_projector.component = self.ncomponents
256 self._pca_preprocessors = PCA.preprocessors
257
258 def _nselected_components(self):
259 """Return the number of selected components."""
260 if self._pca is None:
261 return 0
262
263 if self.ncomponents == 0:
264 # Special "All" value
265 max_comp = len(self._variance_ratio)
266 else:
267 max_comp = self.ncomponents
268
269 var_max = self._cumulative[max_comp - 1]
270 if var_max != numpy.floor(self.variance_covered / 100.0):
271 cut = max_comp
272 assert numpy.isfinite(var_max)
273 self.variance_covered = int(var_max * 100)
274 else:
275 self.ncomponents = cut = numpy.searchsorted(
276 self._cumulative, self.variance_covered / 100.0) + 1
277 return cut
278
279 def _invalidate_selection(self):
280 self.commit()
281
282 def _update_axis(self):
283 p = min(len(self._variance_ratio), self.maxp)
284 axis = self.plot.getAxis("bottom")
285 d = max((p-1)//(self.axis_labels-1), 1)
286 axis.setTicks([[(i, str(i)) for i in range(1, p + 1, d)]])
287
288 def commit(self):
289 transformed = components = None
290 if self._pca is not None:
291 if self._transformed is None:
292 # Compute the full transform (MAX_COMPONENTS components) once.
293 self._transformed = self._pca(self.data)
294 transformed = self._transformed
295
296 domain = Domain(
297 transformed.domain.attributes[:self.ncomponents],
298 self.data.domain.class_vars,
299 self.data.domain.metas
300 )
301 transformed = transformed.from_table(domain, transformed)
302 # prevent caching new features by defining compute_value
303 dom = Domain(
304 [ContinuousVariable(a.name, compute_value=lambda _: None)
305 for a in self._pca.orig_domain.attributes],
306 metas=[StringVariable(name='component')])
307 metas = numpy.array([['PC{}'.format(i + 1)
308 for i in range(self.ncomponents)]],
309 dtype=object).T
310 components = Table(dom, self._pca.components_[:self.ncomponents],
311 metas=metas)
312 components.name = 'components'
313
314 self._pca_projector.component = self.ncomponents
315 self.Outputs.transformed_data.send(transformed)
316 self.Outputs.components.send(components)
317 self.Outputs.pca.send(self._pca_projector)
318
319 def send_report(self):
320 if self.data is None:
321 return
322 self.report_items((
323 ("Normalize data", str(self.normalize)),
324 ("Selected components", self.ncomponents),
325 ("Explained variance", "{:.3f} %".format(self.variance_covered))
326 ))
327 self.report_plot()
328
329 @classmethod
330 def migrate_settings(cls, settings, version):
331 if "variance_covered" in settings:
332 # Due to the error in gh-1896 the variance_covered was persisted
333 # as a NaN value, causing a TypeError in the widgets `__init__`.
334 vc = settings["variance_covered"]
335 if isinstance(vc, numbers.Real):
336 if numpy.isfinite(vc):
337 vc = int(vc)
338 else:
339 vc = 100
340 settings["variance_covered"] = vc
341 if settings.get("ncomponents", 0) > MAX_COMPONENTS:
342 settings["ncomponents"] = MAX_COMPONENTS
343
344 # Remove old `decomposition_idx` when SVD was still included
345 settings.pop("decomposition_idx", None)
346
347 # Remove RemotePCA settings
348 settings.pop("batch_size", None)
349 settings.pop("address", None)
350 settings.pop("auto_update", None)
351
352
353 if __name__ == "__main__": # pragma: no cover
354 WidgetPreview(OWPCA).run(Table("housing"))
355
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Orange/widgets/unsupervised/owpca.py b/Orange/widgets/unsupervised/owpca.py
--- a/Orange/widgets/unsupervised/owpca.py
+++ b/Orange/widgets/unsupervised/owpca.py
@@ -31,6 +31,7 @@
class Outputs:
transformed_data = Output("Transformed Data", Table, replaces=["Transformed data"])
+ data = Output("Data", Table, default=True)
components = Output("Components", Table)
pca = Output("PCA", PCA, dynamic=False)
@@ -180,6 +181,7 @@
def clear_outputs(self):
self.Outputs.transformed_data.send(None)
+ self.Outputs.data.send(None)
self.Outputs.components.send(None)
self.Outputs.pca.send(self._pca_projector)
@@ -286,7 +288,7 @@
axis.setTicks([[(i, str(i)) for i in range(1, p + 1, d)]])
def commit(self):
- transformed = components = None
+ transformed = data = components = None
if self._pca is not None:
if self._transformed is None:
# Compute the full transform (MAX_COMPONENTS components) once.
@@ -311,9 +313,18 @@
metas=metas)
components.name = 'components'
+ data_dom = Domain(
+ self.data.domain.attributes,
+ self.data.domain.class_vars,
+ self.data.domain.metas + domain.attributes)
+ data = Table.from_numpy(
+ data_dom, self.data.X, self.data.Y,
+ numpy.hstack((self.data.metas, transformed.X)))
+
self._pca_projector.component = self.ncomponents
self.Outputs.transformed_data.send(transformed)
self.Outputs.components.send(components)
+ self.Outputs.data.send(data)
self.Outputs.pca.send(self._pca_projector)
def send_report(self):
| {"golden_diff": "diff --git a/Orange/widgets/unsupervised/owpca.py b/Orange/widgets/unsupervised/owpca.py\n--- a/Orange/widgets/unsupervised/owpca.py\n+++ b/Orange/widgets/unsupervised/owpca.py\n@@ -31,6 +31,7 @@\n \n class Outputs:\n transformed_data = Output(\"Transformed Data\", Table, replaces=[\"Transformed data\"])\n+ data = Output(\"Data\", Table, default=True)\n components = Output(\"Components\", Table)\n pca = Output(\"PCA\", PCA, dynamic=False)\n \n@@ -180,6 +181,7 @@\n \n def clear_outputs(self):\n self.Outputs.transformed_data.send(None)\n+ self.Outputs.data.send(None)\n self.Outputs.components.send(None)\n self.Outputs.pca.send(self._pca_projector)\n \n@@ -286,7 +288,7 @@\n axis.setTicks([[(i, str(i)) for i in range(1, p + 1, d)]])\n \n def commit(self):\n- transformed = components = None\n+ transformed = data = components = None\n if self._pca is not None:\n if self._transformed is None:\n # Compute the full transform (MAX_COMPONENTS components) once.\n@@ -311,9 +313,18 @@\n metas=metas)\n components.name = 'components'\n \n+ data_dom = Domain(\n+ self.data.domain.attributes,\n+ self.data.domain.class_vars,\n+ self.data.domain.metas + domain.attributes)\n+ data = Table.from_numpy(\n+ data_dom, self.data.X, self.data.Y,\n+ numpy.hstack((self.data.metas, transformed.X)))\n+\n self._pca_projector.component = self.ncomponents\n self.Outputs.transformed_data.send(transformed)\n self.Outputs.components.send(components)\n+ self.Outputs.data.send(data)\n self.Outputs.pca.send(self._pca_projector)\n \n def send_report(self):\n", "issue": "PCA: retain all data (Unification of embedding methods )\n**Issue:** PCA discards the original attributes and replaces them with PCs. Often (e.g. in scOrange), it is desirable to use the original features: annotate PCA plots, compute differentially-occurring features in different PCA regions, etc. \r\n\r\n**Proposed solution**: Place PCs in meta attributes and rename output to *Data*. \r\n\r\n\r\nThis would be consistent to *t-SNE* and *MDS*. The user could use *Select columns* to restore current behaviour. This is substantially easier (number of PCs is typically small) than merging original attributes with principal components, as it's possible in the current version. \r\n \r\nSuch an improvement would also play nice with the new widget *Apply domain*, which enables projecting data to existing PCs and/or attributes. \r\n\n", "before_files": [{"content": "import numbers\n\nimport numpy\nfrom AnyQt.QtWidgets import QFormLayout\nfrom AnyQt.QtCore import Qt\n\nfrom Orange.data import Table, Domain, StringVariable, ContinuousVariable\nfrom Orange.data.sql.table import SqlTable, AUTO_DL_LIMIT\nfrom Orange.preprocess import preprocess\nfrom Orange.projection import PCA\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.widgets.utils.slidergraph import SliderGraph\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.widget import Input, Output\n\n\n# Maximum number of PCA components that we can set in the widget\nMAX_COMPONENTS = 100\nLINE_NAMES = [\"component variance\", \"cumulative variance\"]\n\n\nclass OWPCA(widget.OWWidget):\n name = \"PCA\"\n description = \"Principal component analysis with a scree-diagram.\"\n icon = \"icons/PCA.svg\"\n priority = 3050\n keywords = [\"principal component analysis\", \"linear transformation\"]\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n transformed_data = Output(\"Transformed Data\", Table, replaces=[\"Transformed data\"])\n components = Output(\"Components\", Table)\n pca = Output(\"PCA\", PCA, dynamic=False)\n\n settingsHandler = settings.DomainContextHandler()\n\n ncomponents = settings.Setting(2)\n variance_covered = settings.Setting(100)\n auto_commit = settings.Setting(True)\n normalize = settings.ContextSetting(True)\n maxp = settings.Setting(20)\n axis_labels = settings.Setting(10)\n\n graph_name = \"plot.plotItem\"\n\n class Warning(widget.OWWidget.Warning):\n trivial_components = widget.Msg(\n \"All components of the PCA are trivial (explain 0 variance). \"\n \"Input data is constant (or near constant).\")\n\n class Error(widget.OWWidget.Error):\n no_features = widget.Msg(\"At least 1 feature is required\")\n no_instances = widget.Msg(\"At least 1 data instance is required\")\n\n def __init__(self):\n super().__init__()\n self.data = None\n\n self._pca = None\n self._transformed = None\n self._variance_ratio = None\n self._cumulative = None\n self._init_projector()\n\n # Components Selection\n box = gui.vBox(self.controlArea, \"Components Selection\")\n form = QFormLayout()\n box.layout().addLayout(form)\n\n self.components_spin = gui.spin(\n box, self, \"ncomponents\", 1, MAX_COMPONENTS,\n callback=self._update_selection_component_spin,\n keyboardTracking=False\n )\n self.components_spin.setSpecialValueText(\"All\")\n\n self.variance_spin = gui.spin(\n box, self, \"variance_covered\", 1, 100,\n callback=self._update_selection_variance_spin,\n keyboardTracking=False\n )\n self.variance_spin.setSuffix(\"%\")\n\n form.addRow(\"Components:\", self.components_spin)\n form.addRow(\"Explained variance:\", self.variance_spin)\n\n # Options\n self.options_box = gui.vBox(self.controlArea, \"Options\")\n self.normalize_box = gui.checkBox(\n self.options_box, self, \"normalize\",\n \"Normalize variables\", callback=self._update_normalize\n )\n\n self.maxp_spin = gui.spin(\n self.options_box, self, \"maxp\", 1, MAX_COMPONENTS,\n label=\"Show only first\", callback=self._setup_plot,\n keyboardTracking=False\n )\n\n self.controlArea.layout().addStretch()\n\n gui.auto_apply(self.controlArea, self, \"auto_commit\")\n\n self.plot = SliderGraph(\n \"Principal Components\", \"Proportion of variance\",\n self._on_cut_changed)\n\n self.mainArea.layout().addWidget(self.plot)\n self._update_normalize()\n\n @Inputs.data\n def set_data(self, data):\n self.closeContext()\n self.clear_messages()\n self.clear()\n self.information()\n self.data = None\n if isinstance(data, SqlTable):\n if data.approx_len() < AUTO_DL_LIMIT:\n data = Table(data)\n else:\n self.information(\"Data has been sampled\")\n data_sample = data.sample_time(1, no_cache=True)\n data_sample.download_data(2000, partial=True)\n data = Table(data_sample)\n if isinstance(data, Table):\n if not data.domain.attributes:\n self.Error.no_features()\n self.clear_outputs()\n return\n if not data:\n self.Error.no_instances()\n self.clear_outputs()\n return\n\n self.openContext(data)\n self._init_projector()\n\n self.data = data\n self.fit()\n\n def fit(self):\n self.clear()\n self.Warning.trivial_components.clear()\n if self.data is None:\n return\n\n data = self.data\n\n if self.normalize:\n self._pca_projector.preprocessors = \\\n self._pca_preprocessors + [preprocess.Normalize(center=False)]\n else:\n self._pca_projector.preprocessors = self._pca_preprocessors\n\n if not isinstance(data, SqlTable):\n pca = self._pca_projector(data)\n variance_ratio = pca.explained_variance_ratio_\n cumulative = numpy.cumsum(variance_ratio)\n\n if numpy.isfinite(cumulative[-1]):\n self.components_spin.setRange(0, len(cumulative))\n self._pca = pca\n self._variance_ratio = variance_ratio\n self._cumulative = cumulative\n self._setup_plot()\n else:\n self.Warning.trivial_components()\n\n self.unconditional_commit()\n\n def clear(self):\n self._pca = None\n self._transformed = None\n self._variance_ratio = None\n self._cumulative = None\n self.plot.clear_plot()\n\n def clear_outputs(self):\n self.Outputs.transformed_data.send(None)\n self.Outputs.components.send(None)\n self.Outputs.pca.send(self._pca_projector)\n\n def _setup_plot(self):\n if self._pca is None:\n self.plot.clear_plot()\n return\n\n explained_ratio = self._variance_ratio\n explained = self._cumulative\n cutpos = self._nselected_components()\n p = min(len(self._variance_ratio), self.maxp)\n\n self.plot.update(\n numpy.arange(1, p+1), [explained_ratio[:p], explained[:p]],\n [Qt.red, Qt.darkYellow], cutpoint_x=cutpos, names=LINE_NAMES)\n\n self._update_axis()\n\n def _on_cut_changed(self, components):\n if components == self.ncomponents \\\n or self.ncomponents == 0 \\\n or self._pca is not None \\\n and components == len(self._variance_ratio):\n return\n\n self.ncomponents = components\n if self._pca is not None:\n var = self._cumulative[components - 1]\n if numpy.isfinite(var):\n self.variance_covered = int(var * 100)\n\n self._invalidate_selection()\n\n def _update_selection_component_spin(self):\n # cut changed by \"ncomponents\" spin.\n if self._pca is None:\n self._invalidate_selection()\n return\n\n if self.ncomponents == 0:\n # Special \"All\" value\n cut = len(self._variance_ratio)\n else:\n cut = self.ncomponents\n\n var = self._cumulative[cut - 1]\n if numpy.isfinite(var):\n self.variance_covered = int(var * 100)\n\n self.plot.set_cut_point(cut)\n self._invalidate_selection()\n\n def _update_selection_variance_spin(self):\n # cut changed by \"max variance\" spin.\n if self._pca is None:\n return\n\n cut = numpy.searchsorted(self._cumulative,\n self.variance_covered / 100.0) + 1\n cut = min(cut, len(self._cumulative))\n self.ncomponents = cut\n self.plot.set_cut_point(cut)\n self._invalidate_selection()\n\n def _update_normalize(self):\n self.fit()\n if self.data is None:\n self._invalidate_selection()\n\n def _init_projector(self):\n self._pca_projector = PCA(n_components=MAX_COMPONENTS, random_state=0)\n self._pca_projector.component = self.ncomponents\n self._pca_preprocessors = PCA.preprocessors\n\n def _nselected_components(self):\n \"\"\"Return the number of selected components.\"\"\"\n if self._pca is None:\n return 0\n\n if self.ncomponents == 0:\n # Special \"All\" value\n max_comp = len(self._variance_ratio)\n else:\n max_comp = self.ncomponents\n\n var_max = self._cumulative[max_comp - 1]\n if var_max != numpy.floor(self.variance_covered / 100.0):\n cut = max_comp\n assert numpy.isfinite(var_max)\n self.variance_covered = int(var_max * 100)\n else:\n self.ncomponents = cut = numpy.searchsorted(\n self._cumulative, self.variance_covered / 100.0) + 1\n return cut\n\n def _invalidate_selection(self):\n self.commit()\n\n def _update_axis(self):\n p = min(len(self._variance_ratio), self.maxp)\n axis = self.plot.getAxis(\"bottom\")\n d = max((p-1)//(self.axis_labels-1), 1)\n axis.setTicks([[(i, str(i)) for i in range(1, p + 1, d)]])\n\n def commit(self):\n transformed = components = None\n if self._pca is not None:\n if self._transformed is None:\n # Compute the full transform (MAX_COMPONENTS components) once.\n self._transformed = self._pca(self.data)\n transformed = self._transformed\n\n domain = Domain(\n transformed.domain.attributes[:self.ncomponents],\n self.data.domain.class_vars,\n self.data.domain.metas\n )\n transformed = transformed.from_table(domain, transformed)\n # prevent caching new features by defining compute_value\n dom = Domain(\n [ContinuousVariable(a.name, compute_value=lambda _: None)\n for a in self._pca.orig_domain.attributes],\n metas=[StringVariable(name='component')])\n metas = numpy.array([['PC{}'.format(i + 1)\n for i in range(self.ncomponents)]],\n dtype=object).T\n components = Table(dom, self._pca.components_[:self.ncomponents],\n metas=metas)\n components.name = 'components'\n\n self._pca_projector.component = self.ncomponents\n self.Outputs.transformed_data.send(transformed)\n self.Outputs.components.send(components)\n self.Outputs.pca.send(self._pca_projector)\n\n def send_report(self):\n if self.data is None:\n return\n self.report_items((\n (\"Normalize data\", str(self.normalize)),\n (\"Selected components\", self.ncomponents),\n (\"Explained variance\", \"{:.3f} %\".format(self.variance_covered))\n ))\n self.report_plot()\n\n @classmethod\n def migrate_settings(cls, settings, version):\n if \"variance_covered\" in settings:\n # Due to the error in gh-1896 the variance_covered was persisted\n # as a NaN value, causing a TypeError in the widgets `__init__`.\n vc = settings[\"variance_covered\"]\n if isinstance(vc, numbers.Real):\n if numpy.isfinite(vc):\n vc = int(vc)\n else:\n vc = 100\n settings[\"variance_covered\"] = vc\n if settings.get(\"ncomponents\", 0) > MAX_COMPONENTS:\n settings[\"ncomponents\"] = MAX_COMPONENTS\n\n # Remove old `decomposition_idx` when SVD was still included\n settings.pop(\"decomposition_idx\", None)\n\n # Remove RemotePCA settings\n settings.pop(\"batch_size\", None)\n settings.pop(\"address\", None)\n settings.pop(\"auto_update\", None)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n WidgetPreview(OWPCA).run(Table(\"housing\"))\n", "path": "Orange/widgets/unsupervised/owpca.py"}], "after_files": [{"content": "import numbers\n\nimport numpy\nfrom AnyQt.QtWidgets import QFormLayout\nfrom AnyQt.QtCore import Qt\n\nfrom Orange.data import Table, Domain, StringVariable, ContinuousVariable\nfrom Orange.data.sql.table import SqlTable, AUTO_DL_LIMIT\nfrom Orange.preprocess import preprocess\nfrom Orange.projection import PCA\nfrom Orange.widgets import widget, gui, settings\nfrom Orange.widgets.utils.slidergraph import SliderGraph\nfrom Orange.widgets.utils.widgetpreview import WidgetPreview\nfrom Orange.widgets.widget import Input, Output\n\n\n# Maximum number of PCA components that we can set in the widget\nMAX_COMPONENTS = 100\nLINE_NAMES = [\"component variance\", \"cumulative variance\"]\n\n\nclass OWPCA(widget.OWWidget):\n name = \"PCA\"\n description = \"Principal component analysis with a scree-diagram.\"\n icon = \"icons/PCA.svg\"\n priority = 3050\n keywords = [\"principal component analysis\", \"linear transformation\"]\n\n class Inputs:\n data = Input(\"Data\", Table)\n\n class Outputs:\n transformed_data = Output(\"Transformed Data\", Table, replaces=[\"Transformed data\"])\n data = Output(\"Data\", Table, default=True)\n components = Output(\"Components\", Table)\n pca = Output(\"PCA\", PCA, dynamic=False)\n\n settingsHandler = settings.DomainContextHandler()\n\n ncomponents = settings.Setting(2)\n variance_covered = settings.Setting(100)\n auto_commit = settings.Setting(True)\n normalize = settings.ContextSetting(True)\n maxp = settings.Setting(20)\n axis_labels = settings.Setting(10)\n\n graph_name = \"plot.plotItem\"\n\n class Warning(widget.OWWidget.Warning):\n trivial_components = widget.Msg(\n \"All components of the PCA are trivial (explain 0 variance). \"\n \"Input data is constant (or near constant).\")\n\n class Error(widget.OWWidget.Error):\n no_features = widget.Msg(\"At least 1 feature is required\")\n no_instances = widget.Msg(\"At least 1 data instance is required\")\n\n def __init__(self):\n super().__init__()\n self.data = None\n\n self._pca = None\n self._transformed = None\n self._variance_ratio = None\n self._cumulative = None\n self._init_projector()\n\n # Components Selection\n box = gui.vBox(self.controlArea, \"Components Selection\")\n form = QFormLayout()\n box.layout().addLayout(form)\n\n self.components_spin = gui.spin(\n box, self, \"ncomponents\", 1, MAX_COMPONENTS,\n callback=self._update_selection_component_spin,\n keyboardTracking=False\n )\n self.components_spin.setSpecialValueText(\"All\")\n\n self.variance_spin = gui.spin(\n box, self, \"variance_covered\", 1, 100,\n callback=self._update_selection_variance_spin,\n keyboardTracking=False\n )\n self.variance_spin.setSuffix(\"%\")\n\n form.addRow(\"Components:\", self.components_spin)\n form.addRow(\"Explained variance:\", self.variance_spin)\n\n # Options\n self.options_box = gui.vBox(self.controlArea, \"Options\")\n self.normalize_box = gui.checkBox(\n self.options_box, self, \"normalize\",\n \"Normalize variables\", callback=self._update_normalize\n )\n\n self.maxp_spin = gui.spin(\n self.options_box, self, \"maxp\", 1, MAX_COMPONENTS,\n label=\"Show only first\", callback=self._setup_plot,\n keyboardTracking=False\n )\n\n self.controlArea.layout().addStretch()\n\n gui.auto_apply(self.controlArea, self, \"auto_commit\")\n\n self.plot = SliderGraph(\n \"Principal Components\", \"Proportion of variance\",\n self._on_cut_changed)\n\n self.mainArea.layout().addWidget(self.plot)\n self._update_normalize()\n\n @Inputs.data\n def set_data(self, data):\n self.closeContext()\n self.clear_messages()\n self.clear()\n self.information()\n self.data = None\n if isinstance(data, SqlTable):\n if data.approx_len() < AUTO_DL_LIMIT:\n data = Table(data)\n else:\n self.information(\"Data has been sampled\")\n data_sample = data.sample_time(1, no_cache=True)\n data_sample.download_data(2000, partial=True)\n data = Table(data_sample)\n if isinstance(data, Table):\n if not data.domain.attributes:\n self.Error.no_features()\n self.clear_outputs()\n return\n if not data:\n self.Error.no_instances()\n self.clear_outputs()\n return\n\n self.openContext(data)\n self._init_projector()\n\n self.data = data\n self.fit()\n\n def fit(self):\n self.clear()\n self.Warning.trivial_components.clear()\n if self.data is None:\n return\n\n data = self.data\n\n if self.normalize:\n self._pca_projector.preprocessors = \\\n self._pca_preprocessors + [preprocess.Normalize(center=False)]\n else:\n self._pca_projector.preprocessors = self._pca_preprocessors\n\n if not isinstance(data, SqlTable):\n pca = self._pca_projector(data)\n variance_ratio = pca.explained_variance_ratio_\n cumulative = numpy.cumsum(variance_ratio)\n\n if numpy.isfinite(cumulative[-1]):\n self.components_spin.setRange(0, len(cumulative))\n self._pca = pca\n self._variance_ratio = variance_ratio\n self._cumulative = cumulative\n self._setup_plot()\n else:\n self.Warning.trivial_components()\n\n self.unconditional_commit()\n\n def clear(self):\n self._pca = None\n self._transformed = None\n self._variance_ratio = None\n self._cumulative = None\n self.plot.clear_plot()\n\n def clear_outputs(self):\n self.Outputs.transformed_data.send(None)\n self.Outputs.data.send(None)\n self.Outputs.components.send(None)\n self.Outputs.pca.send(self._pca_projector)\n\n def _setup_plot(self):\n if self._pca is None:\n self.plot.clear_plot()\n return\n\n explained_ratio = self._variance_ratio\n explained = self._cumulative\n cutpos = self._nselected_components()\n p = min(len(self._variance_ratio), self.maxp)\n\n self.plot.update(\n numpy.arange(1, p+1), [explained_ratio[:p], explained[:p]],\n [Qt.red, Qt.darkYellow], cutpoint_x=cutpos, names=LINE_NAMES)\n\n self._update_axis()\n\n def _on_cut_changed(self, components):\n if components == self.ncomponents \\\n or self.ncomponents == 0 \\\n or self._pca is not None \\\n and components == len(self._variance_ratio):\n return\n\n self.ncomponents = components\n if self._pca is not None:\n var = self._cumulative[components - 1]\n if numpy.isfinite(var):\n self.variance_covered = int(var * 100)\n\n self._invalidate_selection()\n\n def _update_selection_component_spin(self):\n # cut changed by \"ncomponents\" spin.\n if self._pca is None:\n self._invalidate_selection()\n return\n\n if self.ncomponents == 0:\n # Special \"All\" value\n cut = len(self._variance_ratio)\n else:\n cut = self.ncomponents\n\n var = self._cumulative[cut - 1]\n if numpy.isfinite(var):\n self.variance_covered = int(var * 100)\n\n self.plot.set_cut_point(cut)\n self._invalidate_selection()\n\n def _update_selection_variance_spin(self):\n # cut changed by \"max variance\" spin.\n if self._pca is None:\n return\n\n cut = numpy.searchsorted(self._cumulative,\n self.variance_covered / 100.0) + 1\n cut = min(cut, len(self._cumulative))\n self.ncomponents = cut\n self.plot.set_cut_point(cut)\n self._invalidate_selection()\n\n def _update_normalize(self):\n self.fit()\n if self.data is None:\n self._invalidate_selection()\n\n def _init_projector(self):\n self._pca_projector = PCA(n_components=MAX_COMPONENTS, random_state=0)\n self._pca_projector.component = self.ncomponents\n self._pca_preprocessors = PCA.preprocessors\n\n def _nselected_components(self):\n \"\"\"Return the number of selected components.\"\"\"\n if self._pca is None:\n return 0\n\n if self.ncomponents == 0:\n # Special \"All\" value\n max_comp = len(self._variance_ratio)\n else:\n max_comp = self.ncomponents\n\n var_max = self._cumulative[max_comp - 1]\n if var_max != numpy.floor(self.variance_covered / 100.0):\n cut = max_comp\n assert numpy.isfinite(var_max)\n self.variance_covered = int(var_max * 100)\n else:\n self.ncomponents = cut = numpy.searchsorted(\n self._cumulative, self.variance_covered / 100.0) + 1\n return cut\n\n def _invalidate_selection(self):\n self.commit()\n\n def _update_axis(self):\n p = min(len(self._variance_ratio), self.maxp)\n axis = self.plot.getAxis(\"bottom\")\n d = max((p-1)//(self.axis_labels-1), 1)\n axis.setTicks([[(i, str(i)) for i in range(1, p + 1, d)]])\n\n def commit(self):\n transformed = data = components = None\n if self._pca is not None:\n if self._transformed is None:\n # Compute the full transform (MAX_COMPONENTS components) once.\n self._transformed = self._pca(self.data)\n transformed = self._transformed\n\n domain = Domain(\n transformed.domain.attributes[:self.ncomponents],\n self.data.domain.class_vars,\n self.data.domain.metas\n )\n transformed = transformed.from_table(domain, transformed)\n # prevent caching new features by defining compute_value\n dom = Domain(\n [ContinuousVariable(a.name, compute_value=lambda _: None)\n for a in self._pca.orig_domain.attributes],\n metas=[StringVariable(name='component')])\n metas = numpy.array([['PC{}'.format(i + 1)\n for i in range(self.ncomponents)]],\n dtype=object).T\n components = Table(dom, self._pca.components_[:self.ncomponents],\n metas=metas)\n components.name = 'components'\n\n data_dom = Domain(\n self.data.domain.attributes,\n self.data.domain.class_vars,\n self.data.domain.metas + domain.attributes)\n data = Table.from_numpy(\n data_dom, self.data.X, self.data.Y,\n numpy.hstack((self.data.metas, transformed.X)))\n\n self._pca_projector.component = self.ncomponents\n self.Outputs.transformed_data.send(transformed)\n self.Outputs.components.send(components)\n self.Outputs.data.send(data)\n self.Outputs.pca.send(self._pca_projector)\n\n def send_report(self):\n if self.data is None:\n return\n self.report_items((\n (\"Normalize data\", str(self.normalize)),\n (\"Selected components\", self.ncomponents),\n (\"Explained variance\", \"{:.3f} %\".format(self.variance_covered))\n ))\n self.report_plot()\n\n @classmethod\n def migrate_settings(cls, settings, version):\n if \"variance_covered\" in settings:\n # Due to the error in gh-1896 the variance_covered was persisted\n # as a NaN value, causing a TypeError in the widgets `__init__`.\n vc = settings[\"variance_covered\"]\n if isinstance(vc, numbers.Real):\n if numpy.isfinite(vc):\n vc = int(vc)\n else:\n vc = 100\n settings[\"variance_covered\"] = vc\n if settings.get(\"ncomponents\", 0) > MAX_COMPONENTS:\n settings[\"ncomponents\"] = MAX_COMPONENTS\n\n # Remove old `decomposition_idx` when SVD was still included\n settings.pop(\"decomposition_idx\", None)\n\n # Remove RemotePCA settings\n settings.pop(\"batch_size\", None)\n settings.pop(\"address\", None)\n settings.pop(\"auto_update\", None)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n WidgetPreview(OWPCA).run(Table(\"housing\"))\n", "path": "Orange/widgets/unsupervised/owpca.py"}]} | 4,070 | 450 |
gh_patches_debug_37261 | rasdani/github-patches | git_diff | cupy__cupy-781 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cupy.linalg.norm returns complex scalar for complex input
```python
>>> a = cupy.array([1j, 2, 3])
>>> a.dtype
dtype('complex128')
>>> cupy.linalg.norm(a).dtype
dtype('complex128')
```
It should be `float64` in this case.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cupy/linalg/norms.py`
Content:
```
1 import numpy
2 from numpy import linalg
3
4 import cupy
5 from cupy import cuda
6 from cupy.cuda import device
7 from cupy.linalg import decomposition
8 from cupy.linalg import util
9
10
11 if cuda.cusolver_enabled:
12 from cupy.cuda import cusolver
13
14
15 def norm(x, ord=None, axis=None, keepdims=False):
16 """Returns one of matrix norms specified by ``ord`` parameter.
17
18 Complex valued matrices and vectors are not supported.
19 See numpy.linalg.norm for more detail.
20
21 Args:
22 x (cupy.ndarray): Array to take norm. If ``axis`` is None,
23 ``x`` must be 1-D or 2-D.
24 ord (non-zero int, inf, -inf, 'fro'): Norm type.
25 axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
26 ``axis``.
27 keepdims (bool): If this is set ``True``, the axes which are normed
28 over are left.
29
30 Returns:
31 cupy.ndarray
32
33 """
34 if not issubclass(x.dtype.type, numpy.inexact):
35 x = x.astype(float)
36
37 # Immediately handle some default, simple, fast, and common cases.
38 if axis is None:
39 ndim = x.ndim
40 if (ord is None or (ndim == 1 and ord == 2) or
41 (ndim == 2 and ord in ('f', 'fro'))):
42 ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))
43 if keepdims:
44 ret = ret.reshape((1,) * ndim)
45 return ret
46
47 # Normalize the `axis` argument to a tuple.
48 nd = x.ndim
49 if axis is None:
50 axis = tuple(range(nd))
51 elif not isinstance(axis, tuple):
52 try:
53 axis = int(axis)
54 except Exception:
55 raise TypeError(
56 "'axis' must be None, an integer or a tuple of integers")
57 axis = (axis,)
58
59 if len(axis) == 1:
60 if ord == numpy.Inf:
61 return abs(x).max(axis=axis, keepdims=keepdims)
62 elif ord == -numpy.Inf:
63 return abs(x).min(axis=axis, keepdims=keepdims)
64 elif ord == 0:
65 # Zero norm
66 # Convert to Python float in accordance with NumPy
67 return (x != 0).sum(axis=axis, keepdims=keepdims, dtype='d')
68 elif ord == 1:
69 # special case for speedup
70 return abs(x).sum(axis=axis, keepdims=keepdims)
71 elif ord is None or ord == 2:
72 # special case for speedup
73 s = x ** 2
74 return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
75 else:
76 try:
77 float(ord)
78 except TypeError:
79 raise ValueError("Invalid norm order for vectors.")
80 absx = abs(x).astype('d')
81 absx **= ord
82 ret = absx.sum(axis=axis, keepdims=keepdims)
83 ret **= (1.0 / ord)
84 return ret
85 elif len(axis) == 2:
86 row_axis, col_axis = axis
87 if row_axis < 0:
88 row_axis += nd
89 if col_axis < 0:
90 col_axis += nd
91 if not (0 <= row_axis < nd and 0 <= col_axis < nd):
92 raise ValueError('Invalid axis %r for an array with shape %r' %
93 (axis, x.shape))
94 if row_axis == col_axis:
95 raise ValueError('Duplicate axes given.')
96 if ord == 1:
97 if col_axis > row_axis:
98 col_axis -= 1
99 ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
100 elif ord == numpy.Inf:
101 if row_axis > col_axis:
102 row_axis -= 1
103 ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
104 elif ord == -1:
105 if col_axis > row_axis:
106 col_axis -= 1
107 ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
108 elif ord == -numpy.Inf:
109 if row_axis > col_axis:
110 row_axis -= 1
111 ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
112 elif ord in [None, 'fro', 'f']:
113 ret = cupy.sqrt((x ** 2).sum(axis=axis))
114 else:
115 raise ValueError("Invalid norm order for matrices.")
116 if keepdims:
117 ret_shape = list(x.shape)
118 ret_shape[axis[0]] = 1
119 ret_shape[axis[1]] = 1
120 ret = ret.reshape(ret_shape)
121 return ret
122 else:
123 raise ValueError("Improper number of dimensions to norm.")
124
125
126 # TODO(okuta): Implement cond
127
128
129 def det(a):
130 """Retruns the deteminant of an array.
131
132 Args:
133 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
134
135 Returns:
136 cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.
137
138 .. seealso:: :func:`numpy.linalg.det`
139 """
140 sign, logdet = slogdet(a)
141 return sign * cupy.exp(logdet)
142
143
144 def matrix_rank(M, tol=None):
145 """Return matrix rank of array using SVD method
146
147 Args:
148 M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to
149 2.
150 tol (None or float): Threshold of singular value of `M`.
151 When `tol` is `None`, and `eps` is the epsilon value for datatype
152 of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,
153 where `S` is the singular value of `M`.
154 It obeys :func:`numpy.linalg.matrix_rank`.
155
156 Returns:
157 cupy.ndarray: Rank of `M`.
158
159 .. seealso:: :func:`numpy.linalg.matrix_rank`
160 """
161 if M.ndim < 2:
162 return (M != 0).any().astype('l')
163 S = decomposition.svd(M, compute_uv=False)
164 if tol is None:
165 tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *
166 numpy.finfo(S.dtype).eps)
167 return (S > tol).sum(axis=-1)
168
169
170 def slogdet(a):
171 """Returns sign and logarithm of the determinat of an array.
172
173 It calculates the natural logarithm of the deteminant of a given value.
174
175 Args:
176 a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.
177
178 Returns:
179 tuple of :class:`~cupy.ndarray`:
180 It returns a tuple ``(sign, logdet)``. ``sign`` represents each
181 sign of the deteminant as a real number ``0``, ``1`` or ``-1``.
182 'logdet' represents the natural logarithm of the absolute of the
183 deteminant.
184 If the deteninant is zero, ``sign`` will be ``0`` and ``logdet``
185 will be ``-inf``.
186 The shapes of both ``sign`` and ``logdet`` are equal to
187 ``a.shape[:-2]``.
188
189 .. seealso:: :func:`numpy.linalg.slogdet`
190 """
191 if not cuda.cusolver_enabled:
192 raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')
193
194 if a.ndim < 2:
195 msg = ('%d-dimensional array given. '
196 'Array must be at least two-dimensional' % a.ndim)
197 raise linalg.LinAlgError(msg)
198
199 dtype = numpy.find_common_type((a.dtype.char, 'f'), ())
200 shape = a.shape[:-2]
201 sign = cupy.empty(shape, dtype)
202 logdet = cupy.empty(shape, dtype)
203
204 a = a.astype(dtype)
205 for index in numpy.ndindex(*shape):
206 s, l = _slogdet_one(a[index])
207 sign[index] = s
208 logdet[index] = l
209 return sign, logdet
210
211
212 def _slogdet_one(a):
213 util._assert_rank2(a)
214 util._assert_nd_squareness(a)
215 dtype = a.dtype
216
217 handle = device.get_cusolver_handle()
218 m = len(a)
219 ipiv = cupy.empty(m, 'i')
220 info = cupy.empty((), 'i')
221
222 # Need to make a copy because getrf works inplace
223 a_copy = a.copy(order='F')
224
225 if dtype == 'f':
226 getrf_bufferSize = cusolver.sgetrf_bufferSize
227 getrf = cusolver.sgetrf
228 else:
229 getrf_bufferSize = cusolver.dgetrf_bufferSize
230 getrf = cusolver.dgetrf
231
232 buffersize = getrf_bufferSize(handle, m, m, a_copy.data.ptr, m)
233 workspace = cupy.empty(buffersize, dtype=dtype)
234 getrf(handle, m, m, a_copy.data.ptr, m, workspace.data.ptr,
235 ipiv.data.ptr, info.data.ptr)
236
237 if info[()] == 0:
238 diag = cupy.diag(a_copy)
239 # ipiv is 1-origin
240 non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, m + 1)) +
241 cupy.count_nonzero(diag < 0))
242 # Note: sign == -1 ** (non_zero % 2)
243 sign = (non_zero % 2) * -2 + 1
244 logdet = cupy.log(abs(diag)).sum()
245 else:
246 sign = cupy.array(0.0, dtype=dtype)
247 logdet = cupy.array(float('-inf'), dtype)
248
249 return sign, logdet
250
251
252 def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
253 """Returns the sum along the diagonals of an array.
254
255 It computes the sum along the diagonals at ``axis1`` and ``axis2``.
256
257 Args:
258 a (cupy.ndarray): Array to take trace.
259 offset (int): Index of diagonals. Zero indicates the main diagonal, a
260 positive value an upper diagonal, and a negative value a lower
261 diagonal.
262 axis1 (int): The first axis along which the trace is taken.
263 axis2 (int): The second axis along which the trace is taken.
264 dtype: Data type specifier of the output.
265 out (cupy.ndarray): Output array.
266
267 Returns:
268 cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.
269
270 .. seealso:: :func:`numpy.trace`
271
272 """
273 # TODO(okuta): check type
274 return a.trace(offset, axis1, axis2, dtype, out)
275
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py
--- a/cupy/linalg/norms.py
+++ b/cupy/linalg/norms.py
@@ -39,7 +39,12 @@
ndim = x.ndim
if (ord is None or (ndim == 1 and ord == 2) or
(ndim == 2 and ord in ('f', 'fro'))):
- ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ s = abs(x.ravel())
+ s *= s
+ ret = cupy.sqrt(s.sum())
+ else:
+ ret = cupy.sqrt((x.ravel() ** 2).sum())
if keepdims:
ret = ret.reshape((1,) * ndim)
return ret
@@ -70,14 +75,27 @@
return abs(x).sum(axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
- s = x ** 2
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ s = abs(x)
+ s *= s
+ else:
+ s = x ** 2
return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
else:
try:
float(ord)
except TypeError:
raise ValueError("Invalid norm order for vectors.")
- absx = abs(x).astype('d')
+
+ # Mirror Numpy behavior of casting to double for non-complex
+ # dtypes, and to float32 or float64 for complex dtypes and
+ # no reduction over all axes.
+ cast_dtype = 'd'
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ if keepdims or tuple(sorted(axis)) != tuple(range(nd)):
+ cast_dtype = x.dtype.char.lower() # 'D'->'d' and 'F'->'f'
+
+ absx = abs(x).astype(cast_dtype)
absx **= ord
ret = absx.sum(axis=axis, keepdims=keepdims)
ret **= (1.0 / ord)
@@ -110,7 +128,12 @@
row_axis -= 1
ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
- ret = cupy.sqrt((x ** 2).sum(axis=axis))
+ if issubclass(x.dtype.type, numpy.complexfloating):
+ s = abs(x)
+ s *= s
+ ret = cupy.sqrt(s.sum(axis=axis))
+ else:
+ ret = cupy.sqrt((x ** 2).sum(axis=axis))
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
| {"golden_diff": "diff --git a/cupy/linalg/norms.py b/cupy/linalg/norms.py\n--- a/cupy/linalg/norms.py\n+++ b/cupy/linalg/norms.py\n@@ -39,7 +39,12 @@\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n- ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ s = abs(x.ravel())\n+ s *= s\n+ ret = cupy.sqrt(s.sum())\n+ else:\n+ ret = cupy.sqrt((x.ravel() ** 2).sum())\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n@@ -70,14 +75,27 @@\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n- s = x ** 2\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ s = abs(x)\n+ s *= s\n+ else:\n+ s = x ** 2\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n- absx = abs(x).astype('d')\n+\n+ # Mirror Numpy behavior of casting to double for non-complex\n+ # dtypes, and to float32 or float64 for complex dtypes and\n+ # no reduction over all axes.\n+ cast_dtype = 'd'\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ if keepdims or tuple(sorted(axis)) != tuple(range(nd)):\n+ cast_dtype = x.dtype.char.lower() # 'D'->'d' and 'F'->'f'\n+\n+ absx = abs(x).astype(cast_dtype)\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= (1.0 / ord)\n@@ -110,7 +128,12 @@\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n- ret = cupy.sqrt((x ** 2).sum(axis=axis))\n+ if issubclass(x.dtype.type, numpy.complexfloating):\n+ s = abs(x)\n+ s *= s\n+ ret = cupy.sqrt(s.sum(axis=axis))\n+ else:\n+ ret = cupy.sqrt((x ** 2).sum(axis=axis))\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n", "issue": "cupy.linalg.norm returns complex scalar for complex input\n```python\r\n>>> a = cupy.array([1j, 2, 3])\r\n>>> a.dtype\r\ndtype('complex128')\r\n>>> cupy.linalg.norm(a).dtype\r\ndtype('complex128')\r\n```\r\nIt should be `float64` in this case.\n", "before_files": [{"content": "import numpy\nfrom numpy import linalg\n\nimport cupy\nfrom cupy import cuda\nfrom cupy.cuda import device\nfrom cupy.linalg import decomposition\nfrom cupy.linalg import util\n\n\nif cuda.cusolver_enabled:\n from cupy.cuda import cusolver\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n\n Complex valued matrices and vectors are not supported.\n See numpy.linalg.norm for more detail.\n\n Args:\n x (cupy.ndarray): Array to take norm. If ``axis`` is None,\n ``x`` must be 1-D or 2-D.\n ord (non-zero int, inf, -inf, 'fro'): Norm type.\n axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over\n ``axis``.\n keepdims (bool): If this is set ``True``, the axes which are normed\n over are left.\n\n Returns:\n cupy.ndarray\n\n \"\"\"\n if not issubclass(x.dtype.type, numpy.inexact):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n ret = cupy.sqrt(cupy.sum(x.ravel() ** 2))\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\n \"'axis' must be None, an integer or a tuple of integers\")\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == numpy.Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -numpy.Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n # Convert to Python float in accordance with NumPy\n return (x != 0).sum(axis=axis, keepdims=keepdims, dtype='d')\n elif ord == 1:\n # special case for speedup\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n s = x ** 2\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n absx = abs(x).astype('d')\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= (1.0 / ord)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n elif ord == numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).min(axis=col_axis)\n elif ord == -numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n ret = cupy.sqrt((x ** 2).sum(axis=axis))\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n\n# TODO(okuta): Implement cond\n\n\ndef det(a):\n \"\"\"Retruns the deteminant of an array.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.det`\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * cupy.exp(logdet)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"Return matrix rank of array using SVD method\n\n Args:\n M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to\n 2.\n tol (None or float): Threshold of singular value of `M`.\n When `tol` is `None`, and `eps` is the epsilon value for datatype\n of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,\n where `S` is the singular value of `M`.\n It obeys :func:`numpy.linalg.matrix_rank`.\n\n Returns:\n cupy.ndarray: Rank of `M`.\n\n .. seealso:: :func:`numpy.linalg.matrix_rank`\n \"\"\"\n if M.ndim < 2:\n return (M != 0).any().astype('l')\n S = decomposition.svd(M, compute_uv=False)\n if tol is None:\n tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *\n numpy.finfo(S.dtype).eps)\n return (S > tol).sum(axis=-1)\n\n\ndef slogdet(a):\n \"\"\"Returns sign and logarithm of the determinat of an array.\n\n It calculates the natural logarithm of the deteminant of a given value.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n tuple of :class:`~cupy.ndarray`:\n It returns a tuple ``(sign, logdet)``. ``sign`` represents each\n sign of the deteminant as a real number ``0``, ``1`` or ``-1``.\n 'logdet' represents the natural logarithm of the absolute of the\n deteminant.\n If the deteninant is zero, ``sign`` will be ``0`` and ``logdet``\n will be ``-inf``.\n The shapes of both ``sign`` and ``logdet`` are equal to\n ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.slogdet`\n \"\"\"\n if not cuda.cusolver_enabled:\n raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')\n\n if a.ndim < 2:\n msg = ('%d-dimensional array given. '\n 'Array must be at least two-dimensional' % a.ndim)\n raise linalg.LinAlgError(msg)\n\n dtype = numpy.find_common_type((a.dtype.char, 'f'), ())\n shape = a.shape[:-2]\n sign = cupy.empty(shape, dtype)\n logdet = cupy.empty(shape, dtype)\n\n a = a.astype(dtype)\n for index in numpy.ndindex(*shape):\n s, l = _slogdet_one(a[index])\n sign[index] = s\n logdet[index] = l\n return sign, logdet\n\n\ndef _slogdet_one(a):\n util._assert_rank2(a)\n util._assert_nd_squareness(a)\n dtype = a.dtype\n\n handle = device.get_cusolver_handle()\n m = len(a)\n ipiv = cupy.empty(m, 'i')\n info = cupy.empty((), 'i')\n\n # Need to make a copy because getrf works inplace\n a_copy = a.copy(order='F')\n\n if dtype == 'f':\n getrf_bufferSize = cusolver.sgetrf_bufferSize\n getrf = cusolver.sgetrf\n else:\n getrf_bufferSize = cusolver.dgetrf_bufferSize\n getrf = cusolver.dgetrf\n\n buffersize = getrf_bufferSize(handle, m, m, a_copy.data.ptr, m)\n workspace = cupy.empty(buffersize, dtype=dtype)\n getrf(handle, m, m, a_copy.data.ptr, m, workspace.data.ptr,\n ipiv.data.ptr, info.data.ptr)\n\n if info[()] == 0:\n diag = cupy.diag(a_copy)\n # ipiv is 1-origin\n non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, m + 1)) +\n cupy.count_nonzero(diag < 0))\n # Note: sign == -1 ** (non_zero % 2)\n sign = (non_zero % 2) * -2 + 1\n logdet = cupy.log(abs(diag)).sum()\n else:\n sign = cupy.array(0.0, dtype=dtype)\n logdet = cupy.array(float('-inf'), dtype)\n\n return sign, logdet\n\n\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"Returns the sum along the diagonals of an array.\n\n It computes the sum along the diagonals at ``axis1`` and ``axis2``.\n\n Args:\n a (cupy.ndarray): Array to take trace.\n offset (int): Index of diagonals. Zero indicates the main diagonal, a\n positive value an upper diagonal, and a negative value a lower\n diagonal.\n axis1 (int): The first axis along which the trace is taken.\n axis2 (int): The second axis along which the trace is taken.\n dtype: Data type specifier of the output.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.\n\n .. seealso:: :func:`numpy.trace`\n\n \"\"\"\n # TODO(okuta): check type\n return a.trace(offset, axis1, axis2, dtype, out)\n", "path": "cupy/linalg/norms.py"}], "after_files": [{"content": "import numpy\nfrom numpy import linalg\n\nimport cupy\nfrom cupy import cuda\nfrom cupy.cuda import device\nfrom cupy.linalg import decomposition\nfrom cupy.linalg import util\n\n\nif cuda.cusolver_enabled:\n from cupy.cuda import cusolver\n\n\ndef norm(x, ord=None, axis=None, keepdims=False):\n \"\"\"Returns one of matrix norms specified by ``ord`` parameter.\n\n Complex valued matrices and vectors are not supported.\n See numpy.linalg.norm for more detail.\n\n Args:\n x (cupy.ndarray): Array to take norm. If ``axis`` is None,\n ``x`` must be 1-D or 2-D.\n ord (non-zero int, inf, -inf, 'fro'): Norm type.\n axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over\n ``axis``.\n keepdims (bool): If this is set ``True``, the axes which are normed\n over are left.\n\n Returns:\n cupy.ndarray\n\n \"\"\"\n if not issubclass(x.dtype.type, numpy.inexact):\n x = x.astype(float)\n\n # Immediately handle some default, simple, fast, and common cases.\n if axis is None:\n ndim = x.ndim\n if (ord is None or (ndim == 1 and ord == 2) or\n (ndim == 2 and ord in ('f', 'fro'))):\n if issubclass(x.dtype.type, numpy.complexfloating):\n s = abs(x.ravel())\n s *= s\n ret = cupy.sqrt(s.sum())\n else:\n ret = cupy.sqrt((x.ravel() ** 2).sum())\n if keepdims:\n ret = ret.reshape((1,) * ndim)\n return ret\n\n # Normalize the `axis` argument to a tuple.\n nd = x.ndim\n if axis is None:\n axis = tuple(range(nd))\n elif not isinstance(axis, tuple):\n try:\n axis = int(axis)\n except Exception:\n raise TypeError(\n \"'axis' must be None, an integer or a tuple of integers\")\n axis = (axis,)\n\n if len(axis) == 1:\n if ord == numpy.Inf:\n return abs(x).max(axis=axis, keepdims=keepdims)\n elif ord == -numpy.Inf:\n return abs(x).min(axis=axis, keepdims=keepdims)\n elif ord == 0:\n # Zero norm\n # Convert to Python float in accordance with NumPy\n return (x != 0).sum(axis=axis, keepdims=keepdims, dtype='d')\n elif ord == 1:\n # special case for speedup\n return abs(x).sum(axis=axis, keepdims=keepdims)\n elif ord is None or ord == 2:\n # special case for speedup\n if issubclass(x.dtype.type, numpy.complexfloating):\n s = abs(x)\n s *= s\n else:\n s = x ** 2\n return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))\n else:\n try:\n float(ord)\n except TypeError:\n raise ValueError(\"Invalid norm order for vectors.\")\n\n # Mirror Numpy behavior of casting to double for non-complex\n # dtypes, and to float32 or float64 for complex dtypes and\n # no reduction over all axes.\n cast_dtype = 'd'\n if issubclass(x.dtype.type, numpy.complexfloating):\n if keepdims or tuple(sorted(axis)) != tuple(range(nd)):\n cast_dtype = x.dtype.char.lower() # 'D'->'d' and 'F'->'f'\n\n absx = abs(x).astype(cast_dtype)\n absx **= ord\n ret = absx.sum(axis=axis, keepdims=keepdims)\n ret **= (1.0 / ord)\n return ret\n elif len(axis) == 2:\n row_axis, col_axis = axis\n if row_axis < 0:\n row_axis += nd\n if col_axis < 0:\n col_axis += nd\n if not (0 <= row_axis < nd and 0 <= col_axis < nd):\n raise ValueError('Invalid axis %r for an array with shape %r' %\n (axis, x.shape))\n if row_axis == col_axis:\n raise ValueError('Duplicate axes given.')\n if ord == 1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).max(axis=col_axis)\n elif ord == numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).max(axis=row_axis)\n elif ord == -1:\n if col_axis > row_axis:\n col_axis -= 1\n ret = abs(x).sum(axis=row_axis).min(axis=col_axis)\n elif ord == -numpy.Inf:\n if row_axis > col_axis:\n row_axis -= 1\n ret = abs(x).sum(axis=col_axis).min(axis=row_axis)\n elif ord in [None, 'fro', 'f']:\n if issubclass(x.dtype.type, numpy.complexfloating):\n s = abs(x)\n s *= s\n ret = cupy.sqrt(s.sum(axis=axis))\n else:\n ret = cupy.sqrt((x ** 2).sum(axis=axis))\n else:\n raise ValueError(\"Invalid norm order for matrices.\")\n if keepdims:\n ret_shape = list(x.shape)\n ret_shape[axis[0]] = 1\n ret_shape[axis[1]] = 1\n ret = ret.reshape(ret_shape)\n return ret\n else:\n raise ValueError(\"Improper number of dimensions to norm.\")\n\n\n# TODO(okuta): Implement cond\n\n\ndef det(a):\n \"\"\"Retruns the deteminant of an array.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n cupy.ndarray: Determinant of ``a``. Its shape is ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.det`\n \"\"\"\n sign, logdet = slogdet(a)\n return sign * cupy.exp(logdet)\n\n\ndef matrix_rank(M, tol=None):\n \"\"\"Return matrix rank of array using SVD method\n\n Args:\n M (cupy.ndarray): Input array. Its `ndim` must be less than or equal to\n 2.\n tol (None or float): Threshold of singular value of `M`.\n When `tol` is `None`, and `eps` is the epsilon value for datatype\n of `M`, then `tol` is set to `S.max() * max(M.shape) * eps`,\n where `S` is the singular value of `M`.\n It obeys :func:`numpy.linalg.matrix_rank`.\n\n Returns:\n cupy.ndarray: Rank of `M`.\n\n .. seealso:: :func:`numpy.linalg.matrix_rank`\n \"\"\"\n if M.ndim < 2:\n return (M != 0).any().astype('l')\n S = decomposition.svd(M, compute_uv=False)\n if tol is None:\n tol = (S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) *\n numpy.finfo(S.dtype).eps)\n return (S > tol).sum(axis=-1)\n\n\ndef slogdet(a):\n \"\"\"Returns sign and logarithm of the determinat of an array.\n\n It calculates the natural logarithm of the deteminant of a given value.\n\n Args:\n a (cupy.ndarray): The input matrix with dimension ``(..., N, N)``.\n\n Returns:\n tuple of :class:`~cupy.ndarray`:\n It returns a tuple ``(sign, logdet)``. ``sign`` represents each\n sign of the deteminant as a real number ``0``, ``1`` or ``-1``.\n 'logdet' represents the natural logarithm of the absolute of the\n deteminant.\n If the deteninant is zero, ``sign`` will be ``0`` and ``logdet``\n will be ``-inf``.\n The shapes of both ``sign`` and ``logdet`` are equal to\n ``a.shape[:-2]``.\n\n .. seealso:: :func:`numpy.linalg.slogdet`\n \"\"\"\n if not cuda.cusolver_enabled:\n raise RuntimeError('Current cupy only supports cusolver in CUDA 8.0')\n\n if a.ndim < 2:\n msg = ('%d-dimensional array given. '\n 'Array must be at least two-dimensional' % a.ndim)\n raise linalg.LinAlgError(msg)\n\n dtype = numpy.find_common_type((a.dtype.char, 'f'), ())\n shape = a.shape[:-2]\n sign = cupy.empty(shape, dtype)\n logdet = cupy.empty(shape, dtype)\n\n a = a.astype(dtype)\n for index in numpy.ndindex(*shape):\n s, l = _slogdet_one(a[index])\n sign[index] = s\n logdet[index] = l\n return sign, logdet\n\n\ndef _slogdet_one(a):\n util._assert_rank2(a)\n util._assert_nd_squareness(a)\n dtype = a.dtype\n\n handle = device.get_cusolver_handle()\n m = len(a)\n ipiv = cupy.empty(m, 'i')\n info = cupy.empty((), 'i')\n\n # Need to make a copy because getrf works inplace\n a_copy = a.copy(order='F')\n\n if dtype == 'f':\n getrf_bufferSize = cusolver.sgetrf_bufferSize\n getrf = cusolver.sgetrf\n else:\n getrf_bufferSize = cusolver.dgetrf_bufferSize\n getrf = cusolver.dgetrf\n\n buffersize = getrf_bufferSize(handle, m, m, a_copy.data.ptr, m)\n workspace = cupy.empty(buffersize, dtype=dtype)\n getrf(handle, m, m, a_copy.data.ptr, m, workspace.data.ptr,\n ipiv.data.ptr, info.data.ptr)\n\n if info[()] == 0:\n diag = cupy.diag(a_copy)\n # ipiv is 1-origin\n non_zero = (cupy.count_nonzero(ipiv != cupy.arange(1, m + 1)) +\n cupy.count_nonzero(diag < 0))\n # Note: sign == -1 ** (non_zero % 2)\n sign = (non_zero % 2) * -2 + 1\n logdet = cupy.log(abs(diag)).sum()\n else:\n sign = cupy.array(0.0, dtype=dtype)\n logdet = cupy.array(float('-inf'), dtype)\n\n return sign, logdet\n\n\ndef trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\n \"\"\"Returns the sum along the diagonals of an array.\n\n It computes the sum along the diagonals at ``axis1`` and ``axis2``.\n\n Args:\n a (cupy.ndarray): Array to take trace.\n offset (int): Index of diagonals. Zero indicates the main diagonal, a\n positive value an upper diagonal, and a negative value a lower\n diagonal.\n axis1 (int): The first axis along which the trace is taken.\n axis2 (int): The second axis along which the trace is taken.\n dtype: Data type specifier of the output.\n out (cupy.ndarray): Output array.\n\n Returns:\n cupy.ndarray: The trace of ``a`` along axes ``(axis1, axis2)``.\n\n .. seealso:: :func:`numpy.trace`\n\n \"\"\"\n # TODO(okuta): check type\n return a.trace(offset, axis1, axis2, dtype, out)\n", "path": "cupy/linalg/norms.py"}]} | 3,460 | 665 |
gh_patches_debug_28952 | rasdani/github-patches | git_diff | nautobot__nautobot-3943 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Protect against Git-repo jobs clobbering sys.modules
### As ...
Patti - Platform Admin
### I want ...
To add Jobs via Git repositories without worrying about them impacting the overall stability of the platform.
After #3840 , there is a risk that a misnamed (or maliciously-named) Git repository could potentially clobber existing Python code. For example, creating a repository and assigning it the slug `nautobot` causes all sorts of havoc as it results in Nautobot unloading itself and then attempting to reimport all of Nautobot's code from the Git repository by that name.
### So that ...
The application is resilient to user error as well as mischievous/malicious actions by authenticated users.
### I know this is done when...
- Creating a GitRepository whose `slug` matches any currently loaded Python module in the Nautobot environment is rejected with an appropriate error message.
### Optional - Feature groups this request pertains to.
- [X] Automation
- [ ] Circuits
- [ ] DCIM
- [ ] IPAM
- [ ] Misc (including Data Sources)
- [ ] Organization
- [ ] Plugins (and other Extensibility)
- [X] Security (Secrets, etc)
- [ ] Image Management
- [ ] UI/UX
- [ ] Documentation
- [ ] Other (not directly a platform feature)
### Database Changes
None
### External Dependencies
None
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nautobot/extras/models/datasources.py`
Content:
```
1 """Models for representing external data sources."""
2 import os
3
4 from django.conf import settings
5 from django.core.exceptions import ValidationError
6 from django.core.serializers.json import DjangoJSONEncoder
7 from django.core.validators import URLValidator
8 from django.db import models
9
10 from nautobot.core.models.fields import AutoSlugField, slugify_dashes_to_underscores
11 from nautobot.core.models.generics import PrimaryModel
12 from nautobot.extras.utils import extras_features, check_if_key_is_graphql_safe
13
14
15 @extras_features(
16 "config_context_owners",
17 "export_template_owners",
18 "job_results",
19 "webhooks",
20 )
21 class GitRepository(PrimaryModel):
22 """Representation of a Git repository used as an external data source."""
23
24 name = models.CharField(
25 max_length=100,
26 unique=True,
27 )
28 slug = AutoSlugField(
29 populate_from="name",
30 help_text="Internal field name. Please use underscores rather than dashes in this key.",
31 slugify_function=slugify_dashes_to_underscores,
32 )
33
34 remote_url = models.URLField(
35 max_length=255,
36 # For the moment we don't support ssh:// and git:// URLs
37 help_text="Only HTTP and HTTPS URLs are presently supported",
38 validators=[URLValidator(schemes=["http", "https"])],
39 )
40 branch = models.CharField(
41 max_length=64,
42 default="main",
43 )
44
45 current_head = models.CharField(
46 help_text="Commit hash of the most recent fetch from the selected branch. Used for syncing between workers.",
47 max_length=48,
48 default="",
49 blank=True,
50 )
51
52 secrets_group = models.ForeignKey(
53 to="extras.SecretsGroup",
54 on_delete=models.SET_NULL,
55 default=None,
56 blank=True,
57 null=True,
58 related_name="git_repositories",
59 )
60
61 # Data content types that this repo is a source of. Valid options are dynamically generated based on
62 # the data types registered in registry['datasource_contents'].
63 provided_contents = models.JSONField(encoder=DjangoJSONEncoder, default=list, blank=True)
64
65 clone_fields = ["remote_url", "secrets_group", "provided_contents"]
66
67 class Meta:
68 ordering = ["name"]
69 verbose_name = "Git repository"
70 verbose_name_plural = "Git repositories"
71
72 def __init__(self, *args, **kwargs):
73 super().__init__(*args, **kwargs)
74
75 # Store the initial repo slug so we can check for changes on save().
76 self.__initial_slug = self.slug
77
78 def __str__(self):
79 return self.name
80
81 def clean(self):
82 super().clean()
83
84 if self.slug != "":
85 check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, "slug")
86
87 if self.present_in_database and self.slug != self.__initial_slug:
88 raise ValidationError(
89 f"Slug cannot be changed once set. Current slug is {self.__initial_slug}, "
90 f"requested slug is {self.slug}"
91 )
92
93 def get_latest_sync(self):
94 """
95 Return a `JobResult` for the latest sync operation.
96
97 Returns:
98 JobResult
99 """
100 from nautobot.extras.models import JobResult
101
102 # This will match all "GitRepository" jobs (pull/refresh, dry-run, etc.)
103 prefix = "nautobot.core.jobs.GitRepository"
104 return JobResult.objects.filter(task_name__startswith=prefix, task_kwargs__repository=self.pk).latest()
105
106 def to_csv(self):
107 return (
108 self.name,
109 self.slug,
110 self.remote_url,
111 self.branch,
112 self.secrets_group.name if self.secrets_group else None,
113 self.provided_contents,
114 )
115
116 @property
117 def filesystem_path(self):
118 return os.path.join(settings.GIT_ROOT, self.slug)
119
120 def sync(self, user, dry_run=False):
121 """
122 Enqueue a Job to pull the Git repository from the remote and return the sync result.
123
124 Args:
125 user (User): The User that will perform the sync.
126 dry_run (bool): If set, dry-run the Git sync.
127
128 Returns:
129 JobResult
130 """
131 from nautobot.extras.datasources import (
132 enqueue_pull_git_repository_and_refresh_data,
133 enqueue_git_repository_diff_origin_and_local,
134 )
135
136 if dry_run:
137 return enqueue_git_repository_diff_origin_and_local(self, user)
138 return enqueue_pull_git_repository_and_refresh_data(self, user)
139
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nautobot/extras/models/datasources.py b/nautobot/extras/models/datasources.py
--- a/nautobot/extras/models/datasources.py
+++ b/nautobot/extras/models/datasources.py
@@ -1,4 +1,5 @@
"""Models for representing external data sources."""
+from importlib.util import find_spec
import os
from django.conf import settings
@@ -81,13 +82,24 @@
def clean(self):
super().clean()
- if self.slug != "":
- check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, "slug")
+ # Autogenerate slug now, rather than in pre_save(), if not set already, as we need to check it below.
+ if self.slug == "":
+ self._meta.get_field("slug").create_slug(self, add=(not self.present_in_database))
+
+ if self.present_in_database and self.slug != self.__initial_slug:
+ raise ValidationError(
+ f"Slug cannot be changed once set. Current slug is {self.__initial_slug}, "
+ f"requested slug is {self.slug}"
+ )
- if self.present_in_database and self.slug != self.__initial_slug:
+ if not self.present_in_database:
+ check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, "slug")
+ # Check on create whether the proposed slug conflicts with a module name already in the Python environment.
+ # Because we add GIT_ROOT to the end of sys.path, trying to import this repository will instead
+ # import the earlier-found Python module in its place, which would be undesirable.
+ if find_spec(self.slug) is not None:
raise ValidationError(
- f"Slug cannot be changed once set. Current slug is {self.__initial_slug}, "
- f"requested slug is {self.slug}"
+ f'Please choose a different slug, as "{self.slug}" is an installed Python package or module.'
)
def get_latest_sync(self):
| {"golden_diff": "diff --git a/nautobot/extras/models/datasources.py b/nautobot/extras/models/datasources.py\n--- a/nautobot/extras/models/datasources.py\n+++ b/nautobot/extras/models/datasources.py\n@@ -1,4 +1,5 @@\n \"\"\"Models for representing external data sources.\"\"\"\n+from importlib.util import find_spec\n import os\n \n from django.conf import settings\n@@ -81,13 +82,24 @@\n def clean(self):\n super().clean()\n \n- if self.slug != \"\":\n- check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n+ # Autogenerate slug now, rather than in pre_save(), if not set already, as we need to check it below.\n+ if self.slug == \"\":\n+ self._meta.get_field(\"slug\").create_slug(self, add=(not self.present_in_database))\n+\n+ if self.present_in_database and self.slug != self.__initial_slug:\n+ raise ValidationError(\n+ f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n+ f\"requested slug is {self.slug}\"\n+ )\n \n- if self.present_in_database and self.slug != self.__initial_slug:\n+ if not self.present_in_database:\n+ check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n+ # Check on create whether the proposed slug conflicts with a module name already in the Python environment.\n+ # Because we add GIT_ROOT to the end of sys.path, trying to import this repository will instead\n+ # import the earlier-found Python module in its place, which would be undesirable.\n+ if find_spec(self.slug) is not None:\n raise ValidationError(\n- f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n- f\"requested slug is {self.slug}\"\n+ f'Please choose a different slug, as \"{self.slug}\" is an installed Python package or module.'\n )\n \n def get_latest_sync(self):\n", "issue": "Protect against Git-repo jobs clobbering sys.modules\n### As ...\n\nPatti - Platform Admin\n\n### I want ...\n\nTo add Jobs via Git repositories without worrying about them impacting the overall stability of the platform.\r\n\r\nAfter #3840 , there is a risk that a misnamed (or maliciously-named) Git repository could potentially clobber existing Python code. For example, creating a repository and assigning it the slug `nautobot` causes all sorts of havoc as it results in Nautobot unloading itself and then attempting to reimport all of Nautobot's code from the Git repository by that name.\n\n### So that ...\n\nThe application is resilient to user error as well as mischievous/malicious actions by authenticated users.\n\n### I know this is done when...\n\n- Creating a GitRepository whose `slug` matches any currently loaded Python module in the Nautobot environment is rejected with an appropriate error message.\n\n### Optional - Feature groups this request pertains to.\n\n- [X] Automation\n- [ ] Circuits\n- [ ] DCIM\n- [ ] IPAM\n- [ ] Misc (including Data Sources)\n- [ ] Organization\n- [ ] Plugins (and other Extensibility)\n- [X] Security (Secrets, etc)\n- [ ] Image Management\n- [ ] UI/UX\n- [ ] Documentation\n- [ ] Other (not directly a platform feature)\n\n### Database Changes\n\nNone\n\n### External Dependencies\n\nNone\n", "before_files": [{"content": "\"\"\"Models for representing external data sources.\"\"\"\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import URLValidator\nfrom django.db import models\n\nfrom nautobot.core.models.fields import AutoSlugField, slugify_dashes_to_underscores\nfrom nautobot.core.models.generics import PrimaryModel\nfrom nautobot.extras.utils import extras_features, check_if_key_is_graphql_safe\n\n\n@extras_features(\n \"config_context_owners\",\n \"export_template_owners\",\n \"job_results\",\n \"webhooks\",\n)\nclass GitRepository(PrimaryModel):\n \"\"\"Representation of a Git repository used as an external data source.\"\"\"\n\n name = models.CharField(\n max_length=100,\n unique=True,\n )\n slug = AutoSlugField(\n populate_from=\"name\",\n help_text=\"Internal field name. Please use underscores rather than dashes in this key.\",\n slugify_function=slugify_dashes_to_underscores,\n )\n\n remote_url = models.URLField(\n max_length=255,\n # For the moment we don't support ssh:// and git:// URLs\n help_text=\"Only HTTP and HTTPS URLs are presently supported\",\n validators=[URLValidator(schemes=[\"http\", \"https\"])],\n )\n branch = models.CharField(\n max_length=64,\n default=\"main\",\n )\n\n current_head = models.CharField(\n help_text=\"Commit hash of the most recent fetch from the selected branch. Used for syncing between workers.\",\n max_length=48,\n default=\"\",\n blank=True,\n )\n\n secrets_group = models.ForeignKey(\n to=\"extras.SecretsGroup\",\n on_delete=models.SET_NULL,\n default=None,\n blank=True,\n null=True,\n related_name=\"git_repositories\",\n )\n\n # Data content types that this repo is a source of. Valid options are dynamically generated based on\n # the data types registered in registry['datasource_contents'].\n provided_contents = models.JSONField(encoder=DjangoJSONEncoder, default=list, blank=True)\n\n clone_fields = [\"remote_url\", \"secrets_group\", \"provided_contents\"]\n\n class Meta:\n ordering = [\"name\"]\n verbose_name = \"Git repository\"\n verbose_name_plural = \"Git repositories\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Store the initial repo slug so we can check for changes on save().\n self.__initial_slug = self.slug\n\n def __str__(self):\n return self.name\n\n def clean(self):\n super().clean()\n\n if self.slug != \"\":\n check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n\n if self.present_in_database and self.slug != self.__initial_slug:\n raise ValidationError(\n f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n f\"requested slug is {self.slug}\"\n )\n\n def get_latest_sync(self):\n \"\"\"\n Return a `JobResult` for the latest sync operation.\n\n Returns:\n JobResult\n \"\"\"\n from nautobot.extras.models import JobResult\n\n # This will match all \"GitRepository\" jobs (pull/refresh, dry-run, etc.)\n prefix = \"nautobot.core.jobs.GitRepository\"\n return JobResult.objects.filter(task_name__startswith=prefix, task_kwargs__repository=self.pk).latest()\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.remote_url,\n self.branch,\n self.secrets_group.name if self.secrets_group else None,\n self.provided_contents,\n )\n\n @property\n def filesystem_path(self):\n return os.path.join(settings.GIT_ROOT, self.slug)\n\n def sync(self, user, dry_run=False):\n \"\"\"\n Enqueue a Job to pull the Git repository from the remote and return the sync result.\n\n Args:\n user (User): The User that will perform the sync.\n dry_run (bool): If set, dry-run the Git sync.\n\n Returns:\n JobResult\n \"\"\"\n from nautobot.extras.datasources import (\n enqueue_pull_git_repository_and_refresh_data,\n enqueue_git_repository_diff_origin_and_local,\n )\n\n if dry_run:\n return enqueue_git_repository_diff_origin_and_local(self, user)\n return enqueue_pull_git_repository_and_refresh_data(self, user)\n", "path": "nautobot/extras/models/datasources.py"}], "after_files": [{"content": "\"\"\"Models for representing external data sources.\"\"\"\nfrom importlib.util import find_spec\nimport os\n\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.core.validators import URLValidator\nfrom django.db import models\n\nfrom nautobot.core.models.fields import AutoSlugField, slugify_dashes_to_underscores\nfrom nautobot.core.models.generics import PrimaryModel\nfrom nautobot.extras.utils import extras_features, check_if_key_is_graphql_safe\n\n\n@extras_features(\n \"config_context_owners\",\n \"export_template_owners\",\n \"job_results\",\n \"webhooks\",\n)\nclass GitRepository(PrimaryModel):\n \"\"\"Representation of a Git repository used as an external data source.\"\"\"\n\n name = models.CharField(\n max_length=100,\n unique=True,\n )\n slug = AutoSlugField(\n populate_from=\"name\",\n help_text=\"Internal field name. Please use underscores rather than dashes in this key.\",\n slugify_function=slugify_dashes_to_underscores,\n )\n\n remote_url = models.URLField(\n max_length=255,\n # For the moment we don't support ssh:// and git:// URLs\n help_text=\"Only HTTP and HTTPS URLs are presently supported\",\n validators=[URLValidator(schemes=[\"http\", \"https\"])],\n )\n branch = models.CharField(\n max_length=64,\n default=\"main\",\n )\n\n current_head = models.CharField(\n help_text=\"Commit hash of the most recent fetch from the selected branch. Used for syncing between workers.\",\n max_length=48,\n default=\"\",\n blank=True,\n )\n\n secrets_group = models.ForeignKey(\n to=\"extras.SecretsGroup\",\n on_delete=models.SET_NULL,\n default=None,\n blank=True,\n null=True,\n related_name=\"git_repositories\",\n )\n\n # Data content types that this repo is a source of. Valid options are dynamically generated based on\n # the data types registered in registry['datasource_contents'].\n provided_contents = models.JSONField(encoder=DjangoJSONEncoder, default=list, blank=True)\n\n clone_fields = [\"remote_url\", \"secrets_group\", \"provided_contents\"]\n\n class Meta:\n ordering = [\"name\"]\n verbose_name = \"Git repository\"\n verbose_name_plural = \"Git repositories\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Store the initial repo slug so we can check for changes on save().\n self.__initial_slug = self.slug\n\n def __str__(self):\n return self.name\n\n def clean(self):\n super().clean()\n\n # Autogenerate slug now, rather than in pre_save(), if not set already, as we need to check it below.\n if self.slug == \"\":\n self._meta.get_field(\"slug\").create_slug(self, add=(not self.present_in_database))\n\n if self.present_in_database and self.slug != self.__initial_slug:\n raise ValidationError(\n f\"Slug cannot be changed once set. Current slug is {self.__initial_slug}, \"\n f\"requested slug is {self.slug}\"\n )\n\n if not self.present_in_database:\n check_if_key_is_graphql_safe(self.__class__.__name__, self.slug, \"slug\")\n # Check on create whether the proposed slug conflicts with a module name already in the Python environment.\n # Because we add GIT_ROOT to the end of sys.path, trying to import this repository will instead\n # import the earlier-found Python module in its place, which would be undesirable.\n if find_spec(self.slug) is not None:\n raise ValidationError(\n f'Please choose a different slug, as \"{self.slug}\" is an installed Python package or module.'\n )\n\n def get_latest_sync(self):\n \"\"\"\n Return a `JobResult` for the latest sync operation.\n\n Returns:\n JobResult\n \"\"\"\n from nautobot.extras.models import JobResult\n\n # This will match all \"GitRepository\" jobs (pull/refresh, dry-run, etc.)\n prefix = \"nautobot.core.jobs.GitRepository\"\n return JobResult.objects.filter(task_name__startswith=prefix, task_kwargs__repository=self.pk).latest()\n\n def to_csv(self):\n return (\n self.name,\n self.slug,\n self.remote_url,\n self.branch,\n self.secrets_group.name if self.secrets_group else None,\n self.provided_contents,\n )\n\n @property\n def filesystem_path(self):\n return os.path.join(settings.GIT_ROOT, self.slug)\n\n def sync(self, user, dry_run=False):\n \"\"\"\n Enqueue a Job to pull the Git repository from the remote and return the sync result.\n\n Args:\n user (User): The User that will perform the sync.\n dry_run (bool): If set, dry-run the Git sync.\n\n Returns:\n JobResult\n \"\"\"\n from nautobot.extras.datasources import (\n enqueue_pull_git_repository_and_refresh_data,\n enqueue_git_repository_diff_origin_and_local,\n )\n\n if dry_run:\n return enqueue_git_repository_diff_origin_and_local(self, user)\n return enqueue_pull_git_repository_and_refresh_data(self, user)\n", "path": "nautobot/extras/models/datasources.py"}]} | 1,838 | 445 |
gh_patches_debug_160 | rasdani/github-patches | git_diff | cookiecutter__cookiecutter-609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Drop distutils support
I feel it's about time to drop **distutils** support as we rely on [extras_require of setuptools](https://pythonhosted.org/setuptools/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) to handle **yaml** dependencies..
What's your opinion on this matter?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2
3 import os
4 import sys
5
6 try:
7 from setuptools import setup
8 except ImportError:
9 from distutils.core import setup
10
11 version = "1.3.0"
12
13 if sys.argv[-1] == 'publish':
14 os.system('python setup.py sdist upload')
15 os.system('python setup.py bdist_wheel upload')
16 sys.exit()
17
18 if sys.argv[-1] == 'tag':
19 os.system("git tag -a %s -m 'version %s'" % (version, version))
20 os.system("git push --tags")
21 sys.exit()
22
23 with open('README.rst') as readme_file:
24 readme = readme_file.read()
25
26 with open('HISTORY.rst') as history_file:
27 history = history_file.read().replace('.. :changelog:', '')
28
29 requirements = [
30 'future>=0.15.2',
31 'binaryornot>=0.2.0',
32 'jinja2>=2.7',
33 'click>=5.0',
34 'whichcraft>=0.1.1'
35 ]
36
37 long_description = readme + '\n\n' + history
38
39 if sys.argv[-1] == 'readme':
40 print(long_description)
41 sys.exit()
42
43
44 setup(
45 name='cookiecutter',
46 version=version,
47 description=('A command-line utility that creates projects from project '
48 'templates, e.g. creating a Python package project from a '
49 'Python package project template.'),
50 long_description=long_description,
51 author='Audrey Roy',
52 author_email='[email protected]',
53 url='https://github.com/audreyr/cookiecutter',
54 packages=[
55 'cookiecutter',
56 ],
57 package_dir={'cookiecutter': 'cookiecutter'},
58 entry_points={
59 'console_scripts': [
60 'cookiecutter = cookiecutter.cli:main',
61 ]
62 },
63 include_package_data=True,
64 install_requires=requirements,
65 extras_require={
66 ':sys_platform=="win32" and python_version=="2.7"': [
67 'PyYAML>=3.10'
68 ],
69 ':sys_platform!="win32" or python_version!="2.7"': [
70 'ruamel.yaml>=0.10.12'
71 ]
72 },
73 license='BSD',
74 zip_safe=False,
75 classifiers=[
76 'Development Status :: 5 - Production/Stable',
77 'Environment :: Console',
78 'Intended Audience :: Developers',
79 'Natural Language :: English',
80 'License :: OSI Approved :: BSD License',
81 'Programming Language :: Python',
82 'Programming Language :: Python :: 2',
83 'Programming Language :: Python :: 2.7',
84 'Programming Language :: Python :: 3',
85 'Programming Language :: Python :: 3.3',
86 'Programming Language :: Python :: 3.4',
87 'Programming Language :: Python :: 3.5',
88 'Programming Language :: Python :: Implementation :: CPython',
89 'Programming Language :: Python :: Implementation :: PyPy',
90 'Topic :: Software Development',
91 ],
92 keywords=(
93 'cookiecutter, Python, projects, project templates, Jinja2, '
94 'skeleton, scaffolding, project directory, setup.py, package, '
95 'packaging'
96 ),
97 )
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -3,10 +3,7 @@
import os
import sys
-try:
- from setuptools import setup
-except ImportError:
- from distutils.core import setup
+from setuptools import setup
version = "1.3.0"
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -3,10 +3,7 @@\n import os\n import sys\n \n-try:\n- from setuptools import setup\n-except ImportError:\n- from distutils.core import setup\n+from setuptools import setup\n \n version = \"1.3.0\"\n", "issue": "Drop distutils support\nI feel it's about time to drop **distutils** support as we rely on [extras_require of setuptools](https://pythonhosted.org/setuptools/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies) to handle **yaml** dependencies..\n\nWhat's your opinion on this matter?\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\ntry:\n from setuptools import setup\nexcept ImportError:\n from distutils.core import setup\n\nversion = \"1.3.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.1.1'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.cli:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n extras_require={\n ':sys_platform==\"win32\" and python_version==\"2.7\"': [\n 'PyYAML>=3.10'\n ],\n ':sys_platform!=\"win32\" or python_version!=\"2.7\"': [\n 'ruamel.yaml>=0.10.12'\n ]\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\nimport os\nimport sys\n\nfrom setuptools import setup\n\nversion = \"1.3.0\"\n\nif sys.argv[-1] == 'publish':\n os.system('python setup.py sdist upload')\n os.system('python setup.py bdist_wheel upload')\n sys.exit()\n\nif sys.argv[-1] == 'tag':\n os.system(\"git tag -a %s -m 'version %s'\" % (version, version))\n os.system(\"git push --tags\")\n sys.exit()\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read().replace('.. :changelog:', '')\n\nrequirements = [\n 'future>=0.15.2',\n 'binaryornot>=0.2.0',\n 'jinja2>=2.7',\n 'click>=5.0',\n 'whichcraft>=0.1.1'\n]\n\nlong_description = readme + '\\n\\n' + history\n\nif sys.argv[-1] == 'readme':\n print(long_description)\n sys.exit()\n\n\nsetup(\n name='cookiecutter',\n version=version,\n description=('A command-line utility that creates projects from project '\n 'templates, e.g. creating a Python package project from a '\n 'Python package project template.'),\n long_description=long_description,\n author='Audrey Roy',\n author_email='[email protected]',\n url='https://github.com/audreyr/cookiecutter',\n packages=[\n 'cookiecutter',\n ],\n package_dir={'cookiecutter': 'cookiecutter'},\n entry_points={\n 'console_scripts': [\n 'cookiecutter = cookiecutter.cli:main',\n ]\n },\n include_package_data=True,\n install_requires=requirements,\n extras_require={\n ':sys_platform==\"win32\" and python_version==\"2.7\"': [\n 'PyYAML>=3.10'\n ],\n ':sys_platform!=\"win32\" or python_version!=\"2.7\"': [\n 'ruamel.yaml>=0.10.12'\n ]\n },\n license='BSD',\n zip_safe=False,\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'Natural Language :: English',\n 'License :: OSI Approved :: BSD License',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: Implementation :: CPython',\n 'Programming Language :: Python :: Implementation :: PyPy',\n 'Topic :: Software Development',\n ],\n keywords=(\n 'cookiecutter, Python, projects, project templates, Jinja2, '\n 'skeleton, scaffolding, project directory, setup.py, package, '\n 'packaging'\n ),\n)\n", "path": "setup.py"}]} | 1,217 | 75 |
gh_patches_debug_30753 | rasdani/github-patches | git_diff | web2py__web2py-2194 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Stored XSS / Frame injection
In (probably) all versions including latest 2.18.5 is it possible to find one example file which is vulnerable to XSS(reflected, stored) and frame injection. (I do not want to publicly disclose attack vector and specific file because is it still possible to exploit)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gluon/serializers.py`
Content:
```
1 """
2 This file is part of the web2py Web Framework
3 Copyrighted by Massimo Di Pierro <[email protected]>
4 License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
5 """
6 import datetime
7 import decimal
8 from gluon.storage import Storage
9 from gluon.html import TAG, XmlComponent, xmlescape
10 from gluon.languages import lazyT
11 import gluon.contrib.rss2 as rss2
12 import json as json_parser
13 from gluon._compat import long, to_native, unicodeT, integer_types
14
15 have_yaml = True
16 try:
17 import yaml as yamlib
18 except ImportError:
19 have_yaml = False
20
21
22 def cast_keys(o, cast=str, encoding="utf-8"):
23 """
24 Builds a new object with <cast> type keys.
25 Use this function if you are in Python < 2.6.5
26 This avoids syntax errors when unpacking dictionary arguments.
27
28 Args:
29 o: is the object input
30 cast: (defaults to str) is an object type or function
31 which supports conversion such as:
32
33 converted = cast(o)
34
35 encoding: (defaults to utf-8) is the encoding for unicode
36 keys. This is not used for custom cast functions
37
38 """
39
40 if isinstance(o, (dict, Storage)):
41 if isinstance(o, dict):
42 newobj = dict()
43 else:
44 newobj = Storage()
45 for k, v in o.items():
46 if (cast == str) and isinstance(k, unicodeT):
47 key = k.encode(encoding)
48 else:
49 key = cast(k)
50 newobj[key] = cast_keys(v, cast=cast, encoding=encoding)
51 elif isinstance(o, (tuple, set, list)):
52 newobj = []
53 for item in o:
54 newobj.append(cast_keys(item, cast=cast, encoding=encoding))
55 if isinstance(o, tuple):
56 newobj = tuple(newobj)
57 elif isinstance(o, set):
58 newobj = set(newobj)
59 else:
60 # no string cast (unknown object)
61 newobj = o
62 return newobj
63
64
65 def loads_json(o, unicode_keys=True, **kwargs):
66 # deserialize a json string
67 result = json_parser.loads(o, **kwargs)
68 if not unicode_keys:
69 # filter non-str keys in dictionary objects
70 result = cast_keys(result,
71 encoding=kwargs.get("encoding", "utf-8"))
72 return result
73
74
75 def custom_json(o):
76 if hasattr(o, 'custom_json') and callable(o.custom_json):
77 return o.custom_json()
78 if isinstance(o, (datetime.date,
79 datetime.datetime,
80 datetime.time)):
81 return o.isoformat()[:19].replace('T', ' ')
82 elif isinstance(o, integer_types):
83 return int(o)
84 elif isinstance(o, decimal.Decimal):
85 return str(o)
86 elif isinstance(o, (bytes, bytearray)):
87 return str(o)
88 elif isinstance(o, lazyT):
89 return str(o)
90 elif isinstance(o, XmlComponent):
91 return to_native(o.xml())
92 elif isinstance(o, set):
93 return list(o)
94 elif hasattr(o, 'as_list') and callable(o.as_list):
95 return o.as_list()
96 elif hasattr(o, 'as_dict') and callable(o.as_dict):
97 return o.as_dict()
98 else:
99 raise TypeError(repr(o) + " is not JSON serializable")
100
101
102 def xml_rec(value, key, quote=True):
103 if hasattr(value, 'custom_xml') and callable(value.custom_xml):
104 return value.custom_xml()
105 elif isinstance(value, (dict, Storage)):
106 return TAG[key](*[TAG[k](xml_rec(v, '', quote))
107 for k, v in value.items()])
108 elif isinstance(value, list):
109 return TAG[key](*[TAG.item(xml_rec(item, '', quote)) for item in value])
110 elif hasattr(value, 'as_list') and callable(value.as_list):
111 return str(xml_rec(value.as_list(), '', quote))
112 elif hasattr(value, 'as_dict') and callable(value.as_dict):
113 return str(xml_rec(value.as_dict(), '', quote))
114 else:
115 return xmlescape(value, quote)
116
117
118 def xml(value, encoding='UTF-8', key='document', quote=True):
119 return ('<?xml version="1.0" encoding="%s"?>' % encoding) + str(xml_rec(value, key, quote))
120
121
122 def json(value, default=custom_json, indent=None, sort_keys=False):
123 value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)
124 # replace JavaScript incompatible spacing
125 # http://timelessrepo.com/json-isnt-a-javascript-subset
126 # PY3 FIXME
127 # return value.replace(ur'\u2028', '\\u2028').replace(ur'\2029', '\\u2029')
128 return value
129
130 def csv(value):
131 return ''
132
133
134 def ics(events, title=None, link=None, timeshift=0, calname=True,
135 **ignored):
136 title = title or '(unknown)'
137 if link and not callable(link):
138 link = lambda item, prefix=link: prefix.replace(
139 '[id]', str(item['id']))
140 s = 'BEGIN:VCALENDAR'
141 s += '\nVERSION:2.0'
142 if not calname is False:
143 s += '\nX-WR-CALNAME:%s' % (calname or title)
144 s += '\nSUMMARY:%s' % title
145 s += '\nPRODID:Generated by web2py'
146 s += '\nCALSCALE:GREGORIAN'
147 s += '\nMETHOD:PUBLISH'
148 for item in events:
149 s += '\nBEGIN:VEVENT'
150 s += '\nUID:%s' % item['id']
151 if link:
152 s += '\nURL:%s' % link(item)
153 shift = datetime.timedelta(seconds=3600 * timeshift)
154 start = item['start_datetime'] + shift
155 stop = item['stop_datetime'] + shift
156 s += '\nDTSTART:%s' % start.strftime('%Y%m%dT%H%M%S')
157 s += '\nDTEND:%s' % stop.strftime('%Y%m%dT%H%M%S')
158 s += '\nSUMMARY:%s' % item['title']
159 s += '\nEND:VEVENT'
160 s += '\nEND:VCALENDAR'
161 return s
162
163 def safe_encode(text):
164 if not isinstance(text, (str, unicodeT)):
165 text = str(text)
166 try:
167 text = text.encode('utf8','replace')
168 except ValueError:
169 new_text = ''
170 for c in text:
171 try:
172 new_text += c.encode('utf8')
173 except:
174 new_text += '?'
175 text = new_text
176 return text
177
178 def rss(feed):
179 if not 'entries' in feed and 'items' in feed:
180 feed['entries'] = feed['items']
181
182 def safestr(obj, key, default=''):
183 return safe_encode(obj.get(key,''))
184
185 now = datetime.datetime.now()
186 rss = rss2.RSS2(title=safestr(feed,'title'),
187 link=safestr(feed,'link'),
188 description=safestr(feed,'description'),
189 lastBuildDate=feed.get('created_on', now),
190 items=[rss2.RSSItem(
191 title=safestr(entry,'title','(notitle)'),
192 link=safestr(entry,'link'),
193 description=safestr(entry,'description'),
194 pubDate=entry.get('created_on', now)
195 ) for entry in feed.get('entries', [])])
196 return rss.to_xml(encoding='utf8')
197
198
199 def yaml(data):
200 if have_yaml:
201 return yamlib.dump(data)
202 else:
203 raise ImportError("No YAML serializer available")
204
205
206 def loads_yaml(data):
207 if have_yaml:
208 return yamlib.load(data)
209 else:
210 raise ImportError("No YAML serializer available")
211
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gluon/serializers.py b/gluon/serializers.py
--- a/gluon/serializers.py
+++ b/gluon/serializers.py
@@ -119,13 +119,43 @@
return ('<?xml version="1.0" encoding="%s"?>' % encoding) + str(xml_rec(value, key, quote))
-def json(value, default=custom_json, indent=None, sort_keys=False):
- value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)
- # replace JavaScript incompatible spacing
- # http://timelessrepo.com/json-isnt-a-javascript-subset
- # PY3 FIXME
- # return value.replace(ur'\u2028', '\\u2028').replace(ur'\2029', '\\u2029')
- return value
+class JSONEncoderForHTML(json_parser.JSONEncoder):
+ """An encoder that produces JSON safe to embed in HTML.
+ To embed JSON content in, say, a script tag on a web page, the
+ characters &, < and > should be escaped. They cannot be escaped
+ with the usual entities (e.g. &) because they are not expanded
+ within <script> tags.
+ This class also escapes the line separator and paragraph separator
+ characters U+2028 and U+2029, irrespective of the ensure_ascii setting,
+ as these characters are not valid in JavaScript strings (see
+ http://timelessrepo.com/json-isnt-a-javascript-subset).
+ """
+
+ def encode(self, o):
+ # Override JSONEncoder.encode because it has hacks for
+ # performance that make things more complicated.
+ chunks = self.iterencode(o, True)
+ if self.ensure_ascii:
+ return ''.join(chunks)
+ else:
+ return u''.join(chunks)
+
+ def iterencode(self, o, _one_shot=False):
+ chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
+ for chunk in chunks:
+ chunk = chunk.replace('&', '\\u0026')
+ chunk = chunk.replace('<', '\\u003c')
+ chunk = chunk.replace('>', '\\u003e')
+
+ if not self.ensure_ascii:
+ chunk = chunk.replace(u'\u2028', '\\u2028')
+ chunk = chunk.replace(u'\u2029', '\\u2029')
+
+ yield chunk
+
+
+def json(value, default=custom_json, indent=None, sort_keys=False, cls=JSONEncoderForHTML):
+ return json_parser.dumps(value, default=default, cls=cls, sort_keys=sort_keys, indent=indent)
def csv(value):
return ''
| {"golden_diff": "diff --git a/gluon/serializers.py b/gluon/serializers.py\n--- a/gluon/serializers.py\n+++ b/gluon/serializers.py\n@@ -119,13 +119,43 @@\n return ('<?xml version=\"1.0\" encoding=\"%s\"?>' % encoding) + str(xml_rec(value, key, quote))\n \n \n-def json(value, default=custom_json, indent=None, sort_keys=False):\n- value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)\n- # replace JavaScript incompatible spacing\n- # http://timelessrepo.com/json-isnt-a-javascript-subset\n- # PY3 FIXME\n- # return value.replace(ur'\\u2028', '\\\\u2028').replace(ur'\\2029', '\\\\u2029')\n- return value\n+class JSONEncoderForHTML(json_parser.JSONEncoder):\n+ \"\"\"An encoder that produces JSON safe to embed in HTML.\n+ To embed JSON content in, say, a script tag on a web page, the\n+ characters &, < and > should be escaped. They cannot be escaped\n+ with the usual entities (e.g. &) because they are not expanded\n+ within <script> tags.\n+ This class also escapes the line separator and paragraph separator\n+ characters U+2028 and U+2029, irrespective of the ensure_ascii setting,\n+ as these characters are not valid in JavaScript strings (see\n+ http://timelessrepo.com/json-isnt-a-javascript-subset).\n+ \"\"\"\n+\n+ def encode(self, o):\n+ # Override JSONEncoder.encode because it has hacks for\n+ # performance that make things more complicated.\n+ chunks = self.iterencode(o, True)\n+ if self.ensure_ascii:\n+ return ''.join(chunks)\n+ else:\n+ return u''.join(chunks)\n+\n+ def iterencode(self, o, _one_shot=False):\n+ chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)\n+ for chunk in chunks:\n+ chunk = chunk.replace('&', '\\\\u0026')\n+ chunk = chunk.replace('<', '\\\\u003c')\n+ chunk = chunk.replace('>', '\\\\u003e')\n+\n+ if not self.ensure_ascii:\n+ chunk = chunk.replace(u'\\u2028', '\\\\u2028')\n+ chunk = chunk.replace(u'\\u2029', '\\\\u2029')\n+\n+ yield chunk\n+\n+\n+def json(value, default=custom_json, indent=None, sort_keys=False, cls=JSONEncoderForHTML):\n+ return json_parser.dumps(value, default=default, cls=cls, sort_keys=sort_keys, indent=indent)\n \n def csv(value):\n return ''\n", "issue": "Stored XSS / Frame injection\nIn (probably) all versions including latest 2.18.5 is it possible to find one example file which is vulnerable to XSS(reflected, stored) and frame injection. (I do not want to publicly disclose attack vector and specific file because is it still possible to exploit)\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nThis file is part of the web2py Web Framework\nCopyrighted by Massimo Di Pierro <[email protected]>\nLicense: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\"\"\"\nimport datetime\nimport decimal\nfrom gluon.storage import Storage\nfrom gluon.html import TAG, XmlComponent, xmlescape\nfrom gluon.languages import lazyT\nimport gluon.contrib.rss2 as rss2\nimport json as json_parser\nfrom gluon._compat import long, to_native, unicodeT, integer_types\n\nhave_yaml = True\ntry:\n import yaml as yamlib\nexcept ImportError:\n have_yaml = False\n\n\ndef cast_keys(o, cast=str, encoding=\"utf-8\"):\n \"\"\"\n Builds a new object with <cast> type keys.\n Use this function if you are in Python < 2.6.5\n This avoids syntax errors when unpacking dictionary arguments.\n\n Args:\n o: is the object input\n cast: (defaults to str) is an object type or function\n which supports conversion such as:\n\n converted = cast(o)\n\n encoding: (defaults to utf-8) is the encoding for unicode\n keys. This is not used for custom cast functions\n\n \"\"\"\n\n if isinstance(o, (dict, Storage)):\n if isinstance(o, dict):\n newobj = dict()\n else:\n newobj = Storage()\n for k, v in o.items():\n if (cast == str) and isinstance(k, unicodeT):\n key = k.encode(encoding)\n else:\n key = cast(k)\n newobj[key] = cast_keys(v, cast=cast, encoding=encoding)\n elif isinstance(o, (tuple, set, list)):\n newobj = []\n for item in o:\n newobj.append(cast_keys(item, cast=cast, encoding=encoding))\n if isinstance(o, tuple):\n newobj = tuple(newobj)\n elif isinstance(o, set):\n newobj = set(newobj)\n else:\n # no string cast (unknown object)\n newobj = o\n return newobj\n\n\ndef loads_json(o, unicode_keys=True, **kwargs):\n # deserialize a json string\n result = json_parser.loads(o, **kwargs)\n if not unicode_keys:\n # filter non-str keys in dictionary objects\n result = cast_keys(result,\n encoding=kwargs.get(\"encoding\", \"utf-8\"))\n return result\n\n\ndef custom_json(o):\n if hasattr(o, 'custom_json') and callable(o.custom_json):\n return o.custom_json()\n if isinstance(o, (datetime.date,\n datetime.datetime,\n datetime.time)):\n return o.isoformat()[:19].replace('T', ' ')\n elif isinstance(o, integer_types):\n return int(o)\n elif isinstance(o, decimal.Decimal):\n return str(o)\n elif isinstance(o, (bytes, bytearray)):\n return str(o)\n elif isinstance(o, lazyT):\n return str(o)\n elif isinstance(o, XmlComponent):\n return to_native(o.xml())\n elif isinstance(o, set):\n return list(o)\n elif hasattr(o, 'as_list') and callable(o.as_list):\n return o.as_list()\n elif hasattr(o, 'as_dict') and callable(o.as_dict):\n return o.as_dict()\n else:\n raise TypeError(repr(o) + \" is not JSON serializable\")\n\n\ndef xml_rec(value, key, quote=True):\n if hasattr(value, 'custom_xml') and callable(value.custom_xml):\n return value.custom_xml()\n elif isinstance(value, (dict, Storage)):\n return TAG[key](*[TAG[k](xml_rec(v, '', quote))\n for k, v in value.items()])\n elif isinstance(value, list):\n return TAG[key](*[TAG.item(xml_rec(item, '', quote)) for item in value])\n elif hasattr(value, 'as_list') and callable(value.as_list):\n return str(xml_rec(value.as_list(), '', quote))\n elif hasattr(value, 'as_dict') and callable(value.as_dict):\n return str(xml_rec(value.as_dict(), '', quote))\n else:\n return xmlescape(value, quote)\n\n\ndef xml(value, encoding='UTF-8', key='document', quote=True):\n return ('<?xml version=\"1.0\" encoding=\"%s\"?>' % encoding) + str(xml_rec(value, key, quote))\n\n\ndef json(value, default=custom_json, indent=None, sort_keys=False):\n value = json_parser.dumps(value, default=default, sort_keys=sort_keys, indent=indent)\n # replace JavaScript incompatible spacing\n # http://timelessrepo.com/json-isnt-a-javascript-subset\n # PY3 FIXME\n # return value.replace(ur'\\u2028', '\\\\u2028').replace(ur'\\2029', '\\\\u2029')\n return value\n\ndef csv(value):\n return ''\n\n\ndef ics(events, title=None, link=None, timeshift=0, calname=True,\n **ignored):\n title = title or '(unknown)'\n if link and not callable(link):\n link = lambda item, prefix=link: prefix.replace(\n '[id]', str(item['id']))\n s = 'BEGIN:VCALENDAR'\n s += '\\nVERSION:2.0'\n if not calname is False:\n s += '\\nX-WR-CALNAME:%s' % (calname or title)\n s += '\\nSUMMARY:%s' % title\n s += '\\nPRODID:Generated by web2py'\n s += '\\nCALSCALE:GREGORIAN'\n s += '\\nMETHOD:PUBLISH'\n for item in events:\n s += '\\nBEGIN:VEVENT'\n s += '\\nUID:%s' % item['id']\n if link:\n s += '\\nURL:%s' % link(item)\n shift = datetime.timedelta(seconds=3600 * timeshift)\n start = item['start_datetime'] + shift\n stop = item['stop_datetime'] + shift\n s += '\\nDTSTART:%s' % start.strftime('%Y%m%dT%H%M%S')\n s += '\\nDTEND:%s' % stop.strftime('%Y%m%dT%H%M%S')\n s += '\\nSUMMARY:%s' % item['title']\n s += '\\nEND:VEVENT'\n s += '\\nEND:VCALENDAR'\n return s\n\ndef safe_encode(text):\n if not isinstance(text, (str, unicodeT)):\n text = str(text)\n try:\n text = text.encode('utf8','replace')\n except ValueError:\n new_text = ''\n for c in text:\n try:\n new_text += c.encode('utf8')\n except:\n new_text += '?'\n text = new_text\n return text\n\ndef rss(feed):\n if not 'entries' in feed and 'items' in feed:\n feed['entries'] = feed['items']\n\n def safestr(obj, key, default=''):\n return safe_encode(obj.get(key,''))\n\n now = datetime.datetime.now()\n rss = rss2.RSS2(title=safestr(feed,'title'),\n link=safestr(feed,'link'),\n description=safestr(feed,'description'),\n lastBuildDate=feed.get('created_on', now),\n items=[rss2.RSSItem(\n title=safestr(entry,'title','(notitle)'),\n link=safestr(entry,'link'),\n description=safestr(entry,'description'),\n pubDate=entry.get('created_on', now)\n ) for entry in feed.get('entries', [])])\n return rss.to_xml(encoding='utf8')\n\n\ndef yaml(data):\n if have_yaml:\n return yamlib.dump(data)\n else:\n raise ImportError(\"No YAML serializer available\")\n\n\ndef loads_yaml(data):\n if have_yaml:\n return yamlib.load(data)\n else:\n raise ImportError(\"No YAML serializer available\")\n", "path": "gluon/serializers.py"}], "after_files": [{"content": "\"\"\"\nThis file is part of the web2py Web Framework\nCopyrighted by Massimo Di Pierro <[email protected]>\nLicense: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)\n\"\"\"\nimport datetime\nimport decimal\nfrom gluon.storage import Storage\nfrom gluon.html import TAG, XmlComponent, xmlescape\nfrom gluon.languages import lazyT\nimport gluon.contrib.rss2 as rss2\nimport json as json_parser\nfrom gluon._compat import long, to_native, unicodeT, integer_types\n\nhave_yaml = True\ntry:\n import yaml as yamlib\nexcept ImportError:\n have_yaml = False\n\n\ndef cast_keys(o, cast=str, encoding=\"utf-8\"):\n \"\"\"\n Builds a new object with <cast> type keys.\n Use this function if you are in Python < 2.6.5\n This avoids syntax errors when unpacking dictionary arguments.\n\n Args:\n o: is the object input\n cast: (defaults to str) is an object type or function\n which supports conversion such as:\n\n converted = cast(o)\n\n encoding: (defaults to utf-8) is the encoding for unicode\n keys. This is not used for custom cast functions\n\n \"\"\"\n\n if isinstance(o, (dict, Storage)):\n if isinstance(o, dict):\n newobj = dict()\n else:\n newobj = Storage()\n for k, v in o.items():\n if (cast == str) and isinstance(k, unicodeT):\n key = k.encode(encoding)\n else:\n key = cast(k)\n newobj[key] = cast_keys(v, cast=cast, encoding=encoding)\n elif isinstance(o, (tuple, set, list)):\n newobj = []\n for item in o:\n newobj.append(cast_keys(item, cast=cast, encoding=encoding))\n if isinstance(o, tuple):\n newobj = tuple(newobj)\n elif isinstance(o, set):\n newobj = set(newobj)\n else:\n # no string cast (unknown object)\n newobj = o\n return newobj\n\n\ndef loads_json(o, unicode_keys=True, **kwargs):\n # deserialize a json string\n result = json_parser.loads(o, **kwargs)\n if not unicode_keys:\n # filter non-str keys in dictionary objects\n result = cast_keys(result,\n encoding=kwargs.get(\"encoding\", \"utf-8\"))\n return result\n\n\ndef custom_json(o):\n if hasattr(o, 'custom_json') and callable(o.custom_json):\n return o.custom_json()\n if isinstance(o, (datetime.date,\n datetime.datetime,\n datetime.time)):\n return o.isoformat()[:19].replace('T', ' ')\n elif isinstance(o, integer_types):\n return int(o)\n elif isinstance(o, decimal.Decimal):\n return str(o)\n elif isinstance(o, (bytes, bytearray)):\n return str(o)\n elif isinstance(o, lazyT):\n return str(o)\n elif isinstance(o, XmlComponent):\n return to_native(o.xml())\n elif isinstance(o, set):\n return list(o)\n elif hasattr(o, 'as_list') and callable(o.as_list):\n return o.as_list()\n elif hasattr(o, 'as_dict') and callable(o.as_dict):\n return o.as_dict()\n else:\n raise TypeError(repr(o) + \" is not JSON serializable\")\n\n\ndef xml_rec(value, key, quote=True):\n if hasattr(value, 'custom_xml') and callable(value.custom_xml):\n return value.custom_xml()\n elif isinstance(value, (dict, Storage)):\n return TAG[key](*[TAG[k](xml_rec(v, '', quote))\n for k, v in value.items()])\n elif isinstance(value, list):\n return TAG[key](*[TAG.item(xml_rec(item, '', quote)) for item in value])\n elif hasattr(value, 'as_list') and callable(value.as_list):\n return str(xml_rec(value.as_list(), '', quote))\n elif hasattr(value, 'as_dict') and callable(value.as_dict):\n return str(xml_rec(value.as_dict(), '', quote))\n else:\n return xmlescape(value, quote)\n\n\ndef xml(value, encoding='UTF-8', key='document', quote=True):\n return ('<?xml version=\"1.0\" encoding=\"%s\"?>' % encoding) + str(xml_rec(value, key, quote))\n\n\nclass JSONEncoderForHTML(json_parser.JSONEncoder):\n \"\"\"An encoder that produces JSON safe to embed in HTML.\n To embed JSON content in, say, a script tag on a web page, the\n characters &, < and > should be escaped. They cannot be escaped\n with the usual entities (e.g. &) because they are not expanded\n within <script> tags.\n This class also escapes the line separator and paragraph separator\n characters U+2028 and U+2029, irrespective of the ensure_ascii setting,\n as these characters are not valid in JavaScript strings (see\n http://timelessrepo.com/json-isnt-a-javascript-subset).\n \"\"\"\n\n def encode(self, o):\n # Override JSONEncoder.encode because it has hacks for\n # performance that make things more complicated.\n chunks = self.iterencode(o, True)\n if self.ensure_ascii:\n return ''.join(chunks)\n else:\n return u''.join(chunks)\n\n def iterencode(self, o, _one_shot=False):\n chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)\n for chunk in chunks:\n chunk = chunk.replace('&', '\\\\u0026')\n chunk = chunk.replace('<', '\\\\u003c')\n chunk = chunk.replace('>', '\\\\u003e')\n\n if not self.ensure_ascii:\n chunk = chunk.replace(u'\\u2028', '\\\\u2028')\n chunk = chunk.replace(u'\\u2029', '\\\\u2029')\n\n yield chunk\n\n\ndef json(value, default=custom_json, indent=None, sort_keys=False, cls=JSONEncoderForHTML):\n return json_parser.dumps(value, default=default, cls=cls, sort_keys=sort_keys, indent=indent)\n\ndef csv(value):\n return ''\n\n\ndef ics(events, title=None, link=None, timeshift=0, calname=True,\n **ignored):\n title = title or '(unknown)'\n if link and not callable(link):\n link = lambda item, prefix=link: prefix.replace(\n '[id]', str(item['id']))\n s = 'BEGIN:VCALENDAR'\n s += '\\nVERSION:2.0'\n if not calname is False:\n s += '\\nX-WR-CALNAME:%s' % (calname or title)\n s += '\\nSUMMARY:%s' % title\n s += '\\nPRODID:Generated by web2py'\n s += '\\nCALSCALE:GREGORIAN'\n s += '\\nMETHOD:PUBLISH'\n for item in events:\n s += '\\nBEGIN:VEVENT'\n s += '\\nUID:%s' % item['id']\n if link:\n s += '\\nURL:%s' % link(item)\n shift = datetime.timedelta(seconds=3600 * timeshift)\n start = item['start_datetime'] + shift\n stop = item['stop_datetime'] + shift\n s += '\\nDTSTART:%s' % start.strftime('%Y%m%dT%H%M%S')\n s += '\\nDTEND:%s' % stop.strftime('%Y%m%dT%H%M%S')\n s += '\\nSUMMARY:%s' % item['title']\n s += '\\nEND:VEVENT'\n s += '\\nEND:VCALENDAR'\n return s\n\ndef safe_encode(text):\n if not isinstance(text, (str, unicodeT)):\n text = str(text)\n try:\n text = text.encode('utf8','replace')\n except ValueError:\n new_text = ''\n for c in text:\n try:\n new_text += c.encode('utf8')\n except:\n new_text += '?'\n text = new_text\n return text\n\ndef rss(feed):\n if not 'entries' in feed and 'items' in feed:\n feed['entries'] = feed['items']\n\n def safestr(obj, key, default=''):\n return safe_encode(obj.get(key,''))\n\n now = datetime.datetime.now()\n rss = rss2.RSS2(title=safestr(feed,'title'),\n link=safestr(feed,'link'),\n description=safestr(feed,'description'),\n lastBuildDate=feed.get('created_on', now),\n items=[rss2.RSSItem(\n title=safestr(entry,'title','(notitle)'),\n link=safestr(entry,'link'),\n description=safestr(entry,'description'),\n pubDate=entry.get('created_on', now)\n ) for entry in feed.get('entries', [])])\n return rss.to_xml(encoding='utf8')\n\n\ndef yaml(data):\n if have_yaml:\n return yamlib.dump(data)\n else:\n raise ImportError(\"No YAML serializer available\")\n\n\ndef loads_yaml(data):\n if have_yaml:\n return yamlib.load(data)\n else:\n raise ImportError(\"No YAML serializer available\")\n", "path": "gluon/serializers.py"}]} | 2,570 | 632 |
gh_patches_debug_32637 | rasdani/github-patches | git_diff | searxng__searxng-136 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/preferences: use simple layout for the oscar theme
Suggestion: change how the options are displayed in the /preferences page in the oscar theme :
* General
* Default categories
* Search languages
* Autocomplete
* SafeSearch
* Open Access DOI rewrite (plugin)
* Open Access DOI resolver
* Engines
* User Interface
* Interface language
* Themes
* Results on new tabs
* Infinite scroll (plugin)
* Search on category select (plugin)
* Hotkeys (plugins)
* Privacy
* HTTP method
* Image proxy
* Tracker URL remover (plugin)
* Cookies
* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)
* Documentation about the query syntax (bang, external bangs, ...)
* Answers (list of configured answers)
* Special engines: currency, translations (list the engines that ).
* External bangs
/preferences: use simple layout for the oscar theme
Suggestion: change how the options are displayed in the /preferences page in the oscar theme :
* General
* Default categories
* Search languages
* Autocomplete
* SafeSearch
* Open Access DOI rewrite (plugin)
* Open Access DOI resolver
* Engines
* User Interface
* Interface language
* Themes
* Results on new tabs
* Infinite scroll (plugin)
* Search on category select (plugin)
* Hotkeys (plugins)
* Privacy
* HTTP method
* Image proxy
* Tracker URL remover (plugin)
* Cookies
* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)
* Documentation about the query syntax (bang, external bangs, ...)
* Answers (list of configured answers)
* Special engines: currency, translations (list the engines that ).
* External bangs
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/plugins/hash_plugin.py`
Content:
```
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 (C) 2018, 2020 by Vaclav Zouzalik
17 '''
18
19 from flask_babel import gettext
20 import hashlib
21 import re
22
23 name = "Hash plugin"
24 description = gettext("Converts strings to different hash digests.")
25 default_on = True
26
27 parser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)
28
29
30 def post_search(request, search):
31 # process only on first page
32 if search.search_query.pageno > 1:
33 return True
34 m = parser_re.match(search.search_query.query)
35 if not m:
36 # wrong query
37 return True
38
39 function, string = m.groups()
40 if string.strip().__len__() == 0:
41 # end if the string is empty
42 return True
43
44 # select hash function
45 f = hashlib.new(function.lower())
46
47 # make digest from the given string
48 f.update(string.encode('utf-8').strip())
49 answer = function + " " + gettext('hash digest') + ": " + f.hexdigest()
50
51 # print result
52 search.result_container.answers.clear()
53 search.result_container.answers['hash'] = {'answer': answer}
54 return True
55
```
Path: `searx/plugins/__init__.py`
Content:
```
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17
18 from hashlib import sha256
19 from importlib import import_module
20 from os import listdir, makedirs, remove, stat, utime
21 from os.path import abspath, basename, dirname, exists, join
22 from shutil import copyfile
23
24 from searx import logger, settings
25
26
27 logger = logger.getChild('plugins')
28
29 from searx.plugins import (oa_doi_rewrite,
30 ahmia_filter,
31 hash_plugin,
32 infinite_scroll,
33 self_info,
34 search_on_category_select,
35 tracker_url_remover,
36 vim_hotkeys)
37
38 required_attrs = (('name', str),
39 ('description', str),
40 ('default_on', bool))
41
42 optional_attrs = (('js_dependencies', tuple),
43 ('css_dependencies', tuple))
44
45
46 class Plugin():
47 default_on = False
48 name = 'Default plugin'
49 description = 'Default plugin description'
50
51
52 class PluginStore():
53
54 def __init__(self):
55 self.plugins = []
56
57 def __iter__(self):
58 for plugin in self.plugins:
59 yield plugin
60
61 def register(self, *plugins, external=False):
62 if external:
63 plugins = load_external_plugins(plugins)
64 for plugin in plugins:
65 for plugin_attr, plugin_attr_type in required_attrs:
66 if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
67 logger.critical('missing attribute "{0}", cannot load plugin: {1}'.format(plugin_attr, plugin))
68 exit(3)
69 for plugin_attr, plugin_attr_type in optional_attrs:
70 if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
71 setattr(plugin, plugin_attr, plugin_attr_type())
72 plugin.id = plugin.name.replace(' ', '_')
73 self.plugins.append(plugin)
74
75 def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):
76 ret = True
77 for plugin in ordered_plugin_list:
78 if hasattr(plugin, plugin_type):
79 ret = getattr(plugin, plugin_type)(request, *args, **kwargs)
80 if not ret:
81 break
82
83 return ret
84
85
86 def load_external_plugins(plugin_names):
87 plugins = []
88 for name in plugin_names:
89 logger.debug('loading plugin: {0}'.format(name))
90 try:
91 pkg = import_module(name)
92 except Exception as e:
93 logger.critical('failed to load plugin module {0}: {1}'.format(name, e))
94 exit(3)
95
96 pkg.__base_path = dirname(abspath(pkg.__file__))
97
98 prepare_package_resources(pkg, name)
99
100 plugins.append(pkg)
101 logger.debug('plugin "{0}" loaded'.format(name))
102 return plugins
103
104
105 def sync_resource(base_path, resource_path, name, target_dir, plugin_dir):
106 dep_path = join(base_path, resource_path)
107 file_name = basename(dep_path)
108 resource_path = join(target_dir, file_name)
109 if not exists(resource_path) or sha_sum(dep_path) != sha_sum(resource_path):
110 try:
111 copyfile(dep_path, resource_path)
112 # copy atime_ns and mtime_ns, so the weak ETags (generated by
113 # the HTTP server) do not change
114 dep_stat = stat(dep_path)
115 utime(resource_path, ns=(dep_stat.st_atime_ns, dep_stat.st_mtime_ns))
116 except:
117 logger.critical('failed to copy plugin resource {0} for plugin {1}'.format(file_name, name))
118 exit(3)
119
120 # returning with the web path of the resource
121 return join('plugins/external_plugins', plugin_dir, file_name)
122
123
124 def prepare_package_resources(pkg, name):
125 plugin_dir = 'plugin_' + name
126 target_dir = join(settings['ui']['static_path'], 'plugins/external_plugins', plugin_dir)
127 try:
128 makedirs(target_dir, exist_ok=True)
129 except:
130 logger.critical('failed to create resource directory {0} for plugin {1}'.format(target_dir, name))
131 exit(3)
132
133 resources = []
134
135 if hasattr(pkg, 'js_dependencies'):
136 resources.extend(map(basename, pkg.js_dependencies))
137 pkg.js_dependencies = tuple([
138 sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)
139 for x in pkg.js_dependencies
140 ])
141 if hasattr(pkg, 'css_dependencies'):
142 resources.extend(map(basename, pkg.css_dependencies))
143 pkg.css_dependencies = tuple([
144 sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)
145 for x in pkg.css_dependencies
146 ])
147
148 for f in listdir(target_dir):
149 if basename(f) not in resources:
150 resource_path = join(target_dir, basename(f))
151 try:
152 remove(resource_path)
153 except:
154 logger.critical('failed to remove unused resource file {0} for plugin {1}'.format(resource_path, name))
155 exit(3)
156
157
158 def sha_sum(filename):
159 with open(filename, "rb") as f:
160 file_content_bytes = f.read()
161 return sha256(file_content_bytes).hexdigest()
162
163
164 plugins = PluginStore()
165 plugins.register(oa_doi_rewrite)
166 plugins.register(hash_plugin)
167 plugins.register(infinite_scroll)
168 plugins.register(self_info)
169 plugins.register(search_on_category_select)
170 plugins.register(tracker_url_remover)
171 plugins.register(vim_hotkeys)
172 # load external plugins
173 if settings['plugins']:
174 plugins.register(*settings['plugins'], external=True)
175
176 if settings['enabled_plugins']:
177 for plugin in plugins:
178 if plugin.name in settings['enabled_plugins']:
179 plugin.default_on = True
180 else:
181 plugin.default_on = False
182
183 # load tor specific plugins
184 if settings['outgoing']['using_tor_proxy']:
185 plugins.register(ahmia_filter)
186
```
Path: `searx/plugins/self_info.py`
Content:
```
1 '''
2 searx is free software: you can redistribute it and/or modify
3 it under the terms of the GNU Affero General Public License as published by
4 the Free Software Foundation, either version 3 of the License, or
5 (at your option) any later version.
6
7 searx is distributed in the hope that it will be useful,
8 but WITHOUT ANY WARRANTY; without even the implied warranty of
9 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 GNU Affero General Public License for more details.
11
12 You should have received a copy of the GNU Affero General Public License
13 along with searx. If not, see < http://www.gnu.org/licenses/ >.
14
15 (C) 2015 by Adam Tauber, <[email protected]>
16 '''
17 from flask_babel import gettext
18 import re
19 name = gettext('Self Informations')
20 description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
21 default_on = True
22
23
24 # Self User Agent regex
25 p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
26
27
28 # attach callback to the post search hook
29 # request: flask request object
30 # ctx: the whole local context of the pre search hook
31 def post_search(request, search):
32 if search.search_query.pageno > 1:
33 return True
34 if search.search_query.query == 'ip':
35 x_forwarded_for = request.headers.getlist("X-Forwarded-For")
36 if x_forwarded_for:
37 ip = x_forwarded_for[0]
38 else:
39 ip = request.remote_addr
40 search.result_container.answers['ip'] = {'answer': ip}
41 elif p.match(search.search_query.query):
42 ua = request.user_agent
43 search.result_container.answers['user-agent'] = {'answer': ua}
44 return True
45
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/searx/plugins/__init__.py b/searx/plugins/__init__.py
--- a/searx/plugins/__init__.py
+++ b/searx/plugins/__init__.py
@@ -70,6 +70,13 @@
if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):
setattr(plugin, plugin_attr, plugin_attr_type())
plugin.id = plugin.name.replace(' ', '_')
+ if not hasattr(plugin, 'preference_section'):
+ plugin.preference_section = 'general'
+ if plugin.preference_section == 'query':
+ for plugin_attr in ('query_keywords', 'query_examples'):
+ if not hasattr(plugin, plugin_attr):
+ logger.critical('missing attribute "{0}", cannot load plugin: {1}'.format(plugin_attr, plugin))
+ exit(3)
self.plugins.append(plugin)
def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):
diff --git a/searx/plugins/hash_plugin.py b/searx/plugins/hash_plugin.py
--- a/searx/plugins/hash_plugin.py
+++ b/searx/plugins/hash_plugin.py
@@ -23,6 +23,9 @@
name = "Hash plugin"
description = gettext("Converts strings to different hash digests.")
default_on = True
+preference_section = 'query'
+query_keywords = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']
+query_examples = 'sha512 The quick brown fox jumps over the lazy dog'
parser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)
diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py
--- a/searx/plugins/self_info.py
+++ b/searx/plugins/self_info.py
@@ -19,7 +19,9 @@
name = gettext('Self Informations')
description = gettext('Displays your IP if the query is "ip" and your user agent if the query contains "user agent".')
default_on = True
-
+preference_section = 'query'
+query_keywords = ['user-agent']
+query_examples = ''
# Self User Agent regex
p = re.compile('.*user[ -]agent.*', re.IGNORECASE)
| {"golden_diff": "diff --git a/searx/plugins/__init__.py b/searx/plugins/__init__.py\n--- a/searx/plugins/__init__.py\n+++ b/searx/plugins/__init__.py\n@@ -70,6 +70,13 @@\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n setattr(plugin, plugin_attr, plugin_attr_type())\n plugin.id = plugin.name.replace(' ', '_')\n+ if not hasattr(plugin, 'preference_section'):\n+ plugin.preference_section = 'general'\n+ if plugin.preference_section == 'query':\n+ for plugin_attr in ('query_keywords', 'query_examples'):\n+ if not hasattr(plugin, plugin_attr):\n+ logger.critical('missing attribute \"{0}\", cannot load plugin: {1}'.format(plugin_attr, plugin))\n+ exit(3)\n self.plugins.append(plugin)\n \n def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):\ndiff --git a/searx/plugins/hash_plugin.py b/searx/plugins/hash_plugin.py\n--- a/searx/plugins/hash_plugin.py\n+++ b/searx/plugins/hash_plugin.py\n@@ -23,6 +23,9 @@\n name = \"Hash plugin\"\n description = gettext(\"Converts strings to different hash digests.\")\n default_on = True\n+preference_section = 'query'\n+query_keywords = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']\n+query_examples = 'sha512 The quick brown fox jumps over the lazy dog'\n \n parser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)\n \ndiff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py\n--- a/searx/plugins/self_info.py\n+++ b/searx/plugins/self_info.py\n@@ -19,7 +19,9 @@\n name = gettext('Self Informations')\n description = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\n default_on = True\n-\n+preference_section = 'query'\n+query_keywords = ['user-agent']\n+query_examples = ''\n \n # Self User Agent regex\n p = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n", "issue": "/preferences: use simple layout for the oscar theme\nSuggestion: change how the options are displayed in the /preferences page in the oscar theme :\r\n* General\r\n * Default categories\r\n * Search languages\r\n * Autocomplete\r\n * SafeSearch\r\n * Open Access DOI rewrite (plugin)\r\n * Open Access DOI resolver\r\n* Engines\r\n* User Interface\r\n * Interface language\r\n * Themes\r\n * Results on new tabs\r\n * Infinite scroll (plugin)\r\n * Search on category select (plugin)\r\n * Hotkeys (plugins)\r\n* Privacy\r\n * HTTP method\r\n * Image proxy\r\n * Tracker URL remover (plugin)\r\n* Cookies\r\n* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)\r\n * Documentation about the query syntax (bang, external bangs, ...)\r\n * Answers (list of configured answers)\r\n * Special engines: currency, translations (list the engines that ).\r\n * External bangs\r\n\n/preferences: use simple layout for the oscar theme\nSuggestion: change how the options are displayed in the /preferences page in the oscar theme :\r\n* General\r\n * Default categories\r\n * Search languages\r\n * Autocomplete\r\n * SafeSearch\r\n * Open Access DOI rewrite (plugin)\r\n * Open Access DOI resolver\r\n* Engines\r\n* User Interface\r\n * Interface language\r\n * Themes\r\n * Results on new tabs\r\n * Infinite scroll (plugin)\r\n * Search on category select (plugin)\r\n * Hotkeys (plugins)\r\n* Privacy\r\n * HTTP method\r\n * Image proxy\r\n * Tracker URL remover (plugin)\r\n* Cookies\r\n* Query syntax (new tab, a mix between documentation and auto documentation from the current settings)\r\n * Documentation about the query syntax (bang, external bangs, ...)\r\n * Answers (list of configured answers)\r\n * Special engines: currency, translations (list the engines that ).\r\n * External bangs\r\n\n", "before_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n(C) 2018, 2020 by Vaclav Zouzalik\n'''\n\nfrom flask_babel import gettext\nimport hashlib\nimport re\n\nname = \"Hash plugin\"\ndescription = gettext(\"Converts strings to different hash digests.\")\ndefault_on = True\n\nparser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)\n\n\ndef post_search(request, search):\n # process only on first page\n if search.search_query.pageno > 1:\n return True\n m = parser_re.match(search.search_query.query)\n if not m:\n # wrong query\n return True\n\n function, string = m.groups()\n if string.strip().__len__() == 0:\n # end if the string is empty\n return True\n\n # select hash function\n f = hashlib.new(function.lower())\n\n # make digest from the given string\n f.update(string.encode('utf-8').strip())\n answer = function + \" \" + gettext('hash digest') + \": \" + f.hexdigest()\n\n # print result\n search.result_container.answers.clear()\n search.result_container.answers['hash'] = {'answer': answer}\n return True\n", "path": "searx/plugins/hash_plugin.py"}, {"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\n\nfrom hashlib import sha256\nfrom importlib import import_module\nfrom os import listdir, makedirs, remove, stat, utime\nfrom os.path import abspath, basename, dirname, exists, join\nfrom shutil import copyfile\n\nfrom searx import logger, settings\n\n\nlogger = logger.getChild('plugins')\n\nfrom searx.plugins import (oa_doi_rewrite,\n ahmia_filter,\n hash_plugin,\n infinite_scroll,\n self_info,\n search_on_category_select,\n tracker_url_remover,\n vim_hotkeys)\n\nrequired_attrs = (('name', str),\n ('description', str),\n ('default_on', bool))\n\noptional_attrs = (('js_dependencies', tuple),\n ('css_dependencies', tuple))\n\n\nclass Plugin():\n default_on = False\n name = 'Default plugin'\n description = 'Default plugin description'\n\n\nclass PluginStore():\n\n def __init__(self):\n self.plugins = []\n\n def __iter__(self):\n for plugin in self.plugins:\n yield plugin\n\n def register(self, *plugins, external=False):\n if external:\n plugins = load_external_plugins(plugins)\n for plugin in plugins:\n for plugin_attr, plugin_attr_type in required_attrs:\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n logger.critical('missing attribute \"{0}\", cannot load plugin: {1}'.format(plugin_attr, plugin))\n exit(3)\n for plugin_attr, plugin_attr_type in optional_attrs:\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n setattr(plugin, plugin_attr, plugin_attr_type())\n plugin.id = plugin.name.replace(' ', '_')\n self.plugins.append(plugin)\n\n def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):\n ret = True\n for plugin in ordered_plugin_list:\n if hasattr(plugin, plugin_type):\n ret = getattr(plugin, plugin_type)(request, *args, **kwargs)\n if not ret:\n break\n\n return ret\n\n\ndef load_external_plugins(plugin_names):\n plugins = []\n for name in plugin_names:\n logger.debug('loading plugin: {0}'.format(name))\n try:\n pkg = import_module(name)\n except Exception as e:\n logger.critical('failed to load plugin module {0}: {1}'.format(name, e))\n exit(3)\n\n pkg.__base_path = dirname(abspath(pkg.__file__))\n\n prepare_package_resources(pkg, name)\n\n plugins.append(pkg)\n logger.debug('plugin \"{0}\" loaded'.format(name))\n return plugins\n\n\ndef sync_resource(base_path, resource_path, name, target_dir, plugin_dir):\n dep_path = join(base_path, resource_path)\n file_name = basename(dep_path)\n resource_path = join(target_dir, file_name)\n if not exists(resource_path) or sha_sum(dep_path) != sha_sum(resource_path):\n try:\n copyfile(dep_path, resource_path)\n # copy atime_ns and mtime_ns, so the weak ETags (generated by\n # the HTTP server) do not change\n dep_stat = stat(dep_path)\n utime(resource_path, ns=(dep_stat.st_atime_ns, dep_stat.st_mtime_ns))\n except:\n logger.critical('failed to copy plugin resource {0} for plugin {1}'.format(file_name, name))\n exit(3)\n\n # returning with the web path of the resource\n return join('plugins/external_plugins', plugin_dir, file_name)\n\n\ndef prepare_package_resources(pkg, name):\n plugin_dir = 'plugin_' + name\n target_dir = join(settings['ui']['static_path'], 'plugins/external_plugins', plugin_dir)\n try:\n makedirs(target_dir, exist_ok=True)\n except:\n logger.critical('failed to create resource directory {0} for plugin {1}'.format(target_dir, name))\n exit(3)\n\n resources = []\n\n if hasattr(pkg, 'js_dependencies'):\n resources.extend(map(basename, pkg.js_dependencies))\n pkg.js_dependencies = tuple([\n sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)\n for x in pkg.js_dependencies\n ])\n if hasattr(pkg, 'css_dependencies'):\n resources.extend(map(basename, pkg.css_dependencies))\n pkg.css_dependencies = tuple([\n sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)\n for x in pkg.css_dependencies\n ])\n\n for f in listdir(target_dir):\n if basename(f) not in resources:\n resource_path = join(target_dir, basename(f))\n try:\n remove(resource_path)\n except:\n logger.critical('failed to remove unused resource file {0} for plugin {1}'.format(resource_path, name))\n exit(3)\n\n\ndef sha_sum(filename):\n with open(filename, \"rb\") as f:\n file_content_bytes = f.read()\n return sha256(file_content_bytes).hexdigest()\n\n\nplugins = PluginStore()\nplugins.register(oa_doi_rewrite)\nplugins.register(hash_plugin)\nplugins.register(infinite_scroll)\nplugins.register(self_info)\nplugins.register(search_on_category_select)\nplugins.register(tracker_url_remover)\nplugins.register(vim_hotkeys)\n# load external plugins\nif settings['plugins']:\n plugins.register(*settings['plugins'], external=True)\n\nif settings['enabled_plugins']:\n for plugin in plugins:\n if plugin.name in settings['enabled_plugins']:\n plugin.default_on = True\n else:\n plugin.default_on = False\n\n# load tor specific plugins\nif settings['outgoing']['using_tor_proxy']:\n plugins.register(ahmia_filter)\n", "path": "searx/plugins/__init__.py"}, {"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = gettext('Self Informations')\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n if search.search_query.query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n search.result_container.answers['ip'] = {'answer': ip}\n elif p.match(search.search_query.query):\n ua = request.user_agent\n search.result_container.answers['user-agent'] = {'answer': ua}\n return True\n", "path": "searx/plugins/self_info.py"}], "after_files": [{"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n(C) 2018, 2020 by Vaclav Zouzalik\n'''\n\nfrom flask_babel import gettext\nimport hashlib\nimport re\n\nname = \"Hash plugin\"\ndescription = gettext(\"Converts strings to different hash digests.\")\ndefault_on = True\npreference_section = 'query'\nquery_keywords = ['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']\nquery_examples = 'sha512 The quick brown fox jumps over the lazy dog'\n\nparser_re = re.compile('(md5|sha1|sha224|sha256|sha384|sha512) (.*)', re.I)\n\n\ndef post_search(request, search):\n # process only on first page\n if search.search_query.pageno > 1:\n return True\n m = parser_re.match(search.search_query.query)\n if not m:\n # wrong query\n return True\n\n function, string = m.groups()\n if string.strip().__len__() == 0:\n # end if the string is empty\n return True\n\n # select hash function\n f = hashlib.new(function.lower())\n\n # make digest from the given string\n f.update(string.encode('utf-8').strip())\n answer = function + \" \" + gettext('hash digest') + \": \" + f.hexdigest()\n\n # print result\n search.result_container.answers.clear()\n search.result_container.answers['hash'] = {'answer': answer}\n return True\n", "path": "searx/plugins/hash_plugin.py"}, {"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\n\nfrom hashlib import sha256\nfrom importlib import import_module\nfrom os import listdir, makedirs, remove, stat, utime\nfrom os.path import abspath, basename, dirname, exists, join\nfrom shutil import copyfile\n\nfrom searx import logger, settings\n\n\nlogger = logger.getChild('plugins')\n\nfrom searx.plugins import (oa_doi_rewrite,\n ahmia_filter,\n hash_plugin,\n infinite_scroll,\n self_info,\n search_on_category_select,\n tracker_url_remover,\n vim_hotkeys)\n\nrequired_attrs = (('name', str),\n ('description', str),\n ('default_on', bool))\n\noptional_attrs = (('js_dependencies', tuple),\n ('css_dependencies', tuple))\n\n\nclass Plugin():\n default_on = False\n name = 'Default plugin'\n description = 'Default plugin description'\n\n\nclass PluginStore():\n\n def __init__(self):\n self.plugins = []\n\n def __iter__(self):\n for plugin in self.plugins:\n yield plugin\n\n def register(self, *plugins, external=False):\n if external:\n plugins = load_external_plugins(plugins)\n for plugin in plugins:\n for plugin_attr, plugin_attr_type in required_attrs:\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n logger.critical('missing attribute \"{0}\", cannot load plugin: {1}'.format(plugin_attr, plugin))\n exit(3)\n for plugin_attr, plugin_attr_type in optional_attrs:\n if not hasattr(plugin, plugin_attr) or not isinstance(getattr(plugin, plugin_attr), plugin_attr_type):\n setattr(plugin, plugin_attr, plugin_attr_type())\n plugin.id = plugin.name.replace(' ', '_')\n if not hasattr(plugin, 'preference_section'):\n plugin.preference_section = 'general'\n if plugin.preference_section == 'query':\n for plugin_attr in ('query_keywords', 'query_examples'):\n if not hasattr(plugin, plugin_attr):\n logger.critical('missing attribute \"{0}\", cannot load plugin: {1}'.format(plugin_attr, plugin))\n exit(3)\n self.plugins.append(plugin)\n\n def call(self, ordered_plugin_list, plugin_type, request, *args, **kwargs):\n ret = True\n for plugin in ordered_plugin_list:\n if hasattr(plugin, plugin_type):\n ret = getattr(plugin, plugin_type)(request, *args, **kwargs)\n if not ret:\n break\n\n return ret\n\n\ndef load_external_plugins(plugin_names):\n plugins = []\n for name in plugin_names:\n logger.debug('loading plugin: {0}'.format(name))\n try:\n pkg = import_module(name)\n except Exception as e:\n logger.critical('failed to load plugin module {0}: {1}'.format(name, e))\n exit(3)\n\n pkg.__base_path = dirname(abspath(pkg.__file__))\n\n prepare_package_resources(pkg, name)\n\n plugins.append(pkg)\n logger.debug('plugin \"{0}\" loaded'.format(name))\n return plugins\n\n\ndef sync_resource(base_path, resource_path, name, target_dir, plugin_dir):\n dep_path = join(base_path, resource_path)\n file_name = basename(dep_path)\n resource_path = join(target_dir, file_name)\n if not exists(resource_path) or sha_sum(dep_path) != sha_sum(resource_path):\n try:\n copyfile(dep_path, resource_path)\n # copy atime_ns and mtime_ns, so the weak ETags (generated by\n # the HTTP server) do not change\n dep_stat = stat(dep_path)\n utime(resource_path, ns=(dep_stat.st_atime_ns, dep_stat.st_mtime_ns))\n except:\n logger.critical('failed to copy plugin resource {0} for plugin {1}'.format(file_name, name))\n exit(3)\n\n # returning with the web path of the resource\n return join('plugins/external_plugins', plugin_dir, file_name)\n\n\ndef prepare_package_resources(pkg, name):\n plugin_dir = 'plugin_' + name\n target_dir = join(settings['ui']['static_path'], 'plugins/external_plugins', plugin_dir)\n try:\n makedirs(target_dir, exist_ok=True)\n except:\n logger.critical('failed to create resource directory {0} for plugin {1}'.format(target_dir, name))\n exit(3)\n\n resources = []\n\n if hasattr(pkg, 'js_dependencies'):\n resources.extend(map(basename, pkg.js_dependencies))\n pkg.js_dependencies = tuple([\n sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)\n for x in pkg.js_dependencies\n ])\n if hasattr(pkg, 'css_dependencies'):\n resources.extend(map(basename, pkg.css_dependencies))\n pkg.css_dependencies = tuple([\n sync_resource(pkg.__base_path, x, name, target_dir, plugin_dir)\n for x in pkg.css_dependencies\n ])\n\n for f in listdir(target_dir):\n if basename(f) not in resources:\n resource_path = join(target_dir, basename(f))\n try:\n remove(resource_path)\n except:\n logger.critical('failed to remove unused resource file {0} for plugin {1}'.format(resource_path, name))\n exit(3)\n\n\ndef sha_sum(filename):\n with open(filename, \"rb\") as f:\n file_content_bytes = f.read()\n return sha256(file_content_bytes).hexdigest()\n\n\nplugins = PluginStore()\nplugins.register(oa_doi_rewrite)\nplugins.register(hash_plugin)\nplugins.register(infinite_scroll)\nplugins.register(self_info)\nplugins.register(search_on_category_select)\nplugins.register(tracker_url_remover)\nplugins.register(vim_hotkeys)\n# load external plugins\nif settings['plugins']:\n plugins.register(*settings['plugins'], external=True)\n\nif settings['enabled_plugins']:\n for plugin in plugins:\n if plugin.name in settings['enabled_plugins']:\n plugin.default_on = True\n else:\n plugin.default_on = False\n\n# load tor specific plugins\nif settings['outgoing']['using_tor_proxy']:\n plugins.register(ahmia_filter)\n", "path": "searx/plugins/__init__.py"}, {"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = gettext('Self Informations')\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\npreference_section = 'query'\nquery_keywords = ['user-agent']\nquery_examples = ''\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, search):\n if search.search_query.pageno > 1:\n return True\n if search.search_query.query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n search.result_container.answers['ip'] = {'answer': ip}\n elif p.match(search.search_query.query):\n ua = request.user_agent\n search.result_container.answers['user-agent'] = {'answer': ua}\n return True\n", "path": "searx/plugins/self_info.py"}]} | 3,562 | 531 |
gh_patches_debug_23437 | rasdani/github-patches | git_diff | ansible__ansible-modules-core-845 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
rhn_channel is using depreciated methods
Issue Type:
Bug Report
Ansible Version:
1.7.2
Environment:
OS X 10.9 / RHEL 6/7
Summary:
When adding a child channel to rhn sat server rhn_channel fails with 500 error.
Steps To Reproduce:
attempt to subscribe to a child channel with rhn_channel to sat server 5.4+
Expected Results:
Server gets subscribed to child channel.
Actual Results:
xmlrpclib.ProtocolError: <ProtocolError for redhat.example.com/rpc/api: 500 Internal Server Error>
channel.software.setSystemChannels is depreciated.
https://access.redhat.com/documentation/en-US/Red_Hat_Network_Satellite/5.5/html/API_Overview/files/html/handlers/ChannelSoftwareHandler.html#setSystemChannels
PR to follow
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packaging/os/rhn_channel.py`
Content:
```
1 #!/usr/bin/python
2
3 # (c) Vincent Van de Kussen
4 #
5 # This file is part of Ansible
6 #
7 # Ansible is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # Ansible is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
19
20 DOCUMENTATION = '''
21 ---
22 module: rhn_channel
23 short_description: Adds or removes Red Hat software channels
24 description:
25 - Adds or removes Red Hat software channels
26 version_added: "1.1"
27 author: Vincent Van der Kussen
28 notes:
29 - this module fetches the system id from RHN.
30 requirements:
31 - none
32 options:
33 name:
34 description:
35 - name of the software channel
36 required: true
37 default: null
38 sysname:
39 description:
40 - name of the system as it is known in RHN/Satellite
41 required: true
42 default: null
43 state:
44 description:
45 - whether the channel should be present or not
46 required: false
47 default: present
48 url:
49 description:
50 - The full url to the RHN/Satellite api
51 required: true
52 user:
53 description:
54 - RHN/Satellite user
55 required: true
56 password:
57 description:
58 - "the user's password"
59 required: true
60 '''
61
62 EXAMPLES = '''
63 - rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme
64 '''
65
66 import xmlrpclib
67 from operator import itemgetter
68 import re
69
70
71 # ------------------------------------------------------- #
72
73 def get_systemid(client, session, sysname):
74 systems = client.system.listUserSystems(session)
75 for system in systems:
76 if system.get('name') == sysname:
77 idres = system.get('id')
78 idd = int(idres)
79 return idd
80
81 # ------------------------------------------------------- #
82
83 # unused:
84 #
85 #def get_localsystemid():
86 # f = open("/etc/sysconfig/rhn/systemid", "r")
87 # content = f.read()
88 # loc_id = re.search(r'\b(ID-)(\d{10})' ,content)
89 # return loc_id.group(2)
90
91 # ------------------------------------------------------- #
92
93 def subscribe_channels(channels, client, session, sysname, sys_id):
94 c = base_channels(client, session, sys_id)
95 c.append(channels)
96 return client.channel.software.setSystemChannels(session, sys_id, c)
97
98 # ------------------------------------------------------- #
99
100 def unsubscribe_channels(channels, client, session, sysname, sys_id):
101 c = base_channels(client, session, sys_id)
102 c.remove(channels)
103 return client.channel.software.setSystemChannels(session, sys_id, c)
104
105 # ------------------------------------------------------- #
106
107 def base_channels(client, session, sys_id):
108 basechan = client.channel.software.listSystemChannels(session, sys_id)
109 try:
110 chans = [item['label'] for item in basechan]
111 except KeyError:
112 chans = [item['channel_label'] for item in basechan]
113 return chans
114
115 # ------------------------------------------------------- #
116
117
118 def main():
119
120 module = AnsibleModule(
121 argument_spec = dict(
122 state = dict(default='present', choices=['present', 'absent']),
123 name = dict(required=True),
124 sysname = dict(required=True),
125 url = dict(required=True),
126 user = dict(required=True),
127 password = dict(required=True, aliases=['pwd']),
128 )
129 # supports_check_mode=True
130 )
131
132 state = module.params['state']
133 channelname = module.params['name']
134 systname = module.params['sysname']
135 saturl = module.params['url']
136 user = module.params['user']
137 password = module.params['password']
138
139 #initialize connection
140 client = xmlrpclib.Server(saturl, verbose=0)
141 session = client.auth.login(user, password)
142
143 # get systemid
144 sys_id = get_systemid(client, session, systname)
145
146 # get channels for system
147 chans = base_channels(client, session, sys_id)
148
149
150 if state == 'present':
151 if channelname in chans:
152 module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
153 else:
154 subscribe_channels(channelname, client, session, systname, sys_id)
155 module.exit_json(changed=True, msg="Channel %s added" % channelname)
156
157 if state == 'absent':
158 if not channelname in chans:
159 module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
160 else:
161 unsubscribe_channels(channelname, client, session, systname, sys_id)
162 module.exit_json(changed=True, msg="Channel %s removed" % channelname)
163
164 client.auth.logout(session)
165
166
167 # import module snippets
168 from ansible.module_utils.basic import *
169 main()
170
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py
--- a/packaging/os/rhn_channel.py
+++ b/packaging/os/rhn_channel.py
@@ -90,17 +90,17 @@
# ------------------------------------------------------- #
-def subscribe_channels(channels, client, session, sysname, sys_id):
- c = base_channels(client, session, sys_id)
- c.append(channels)
- return client.channel.software.setSystemChannels(session, sys_id, c)
+def subscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.append(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
-def unsubscribe_channels(channels, client, session, sysname, sys_id):
- c = base_channels(client, session, sys_id)
- c.remove(channels)
- return client.channel.software.setSystemChannels(session, sys_id, c)
+def unsubscribe_channels(channelname, client, session, sysname, sys_id):
+ channels = base_channels(client, session, sys_id)
+ channels.remove(channelname)
+ return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
@@ -167,3 +167,4 @@
# import module snippets
from ansible.module_utils.basic import *
main()
+
| {"golden_diff": "diff --git a/packaging/os/rhn_channel.py b/packaging/os/rhn_channel.py\n--- a/packaging/os/rhn_channel.py\n+++ b/packaging/os/rhn_channel.py\n@@ -90,17 +90,17 @@\n \n # ------------------------------------------------------- #\n \n-def subscribe_channels(channels, client, session, sysname, sys_id):\n- c = base_channels(client, session, sys_id)\n- c.append(channels)\n- return client.channel.software.setSystemChannels(session, sys_id, c)\n+def subscribe_channels(channelname, client, session, sysname, sys_id):\n+ channels = base_channels(client, session, sys_id)\n+ channels.append(channelname)\n+ return client.system.setChildChannels(session, sys_id, channels)\n \n # ------------------------------------------------------- #\n \n-def unsubscribe_channels(channels, client, session, sysname, sys_id):\n- c = base_channels(client, session, sys_id)\n- c.remove(channels)\n- return client.channel.software.setSystemChannels(session, sys_id, c)\n+def unsubscribe_channels(channelname, client, session, sysname, sys_id):\n+ channels = base_channels(client, session, sys_id)\n+ channels.remove(channelname)\n+ return client.system.setChildChannels(session, sys_id, channels)\n \n # ------------------------------------------------------- #\n \n@@ -167,3 +167,4 @@\n # import module snippets\n from ansible.module_utils.basic import *\n main()\n+\n", "issue": "rhn_channel is using depreciated methods\nIssue Type:\nBug Report\n\nAnsible Version: \n1.7.2\n\nEnvironment: \nOS X 10.9 / RHEL 6/7\n\nSummary:\nWhen adding a child channel to rhn sat server rhn_channel fails with 500 error. \n\nSteps To Reproduce:\nattempt to subscribe to a child channel with rhn_channel to sat server 5.4+\n\nExpected Results:\nServer gets subscribed to child channel.\n\nActual Results:\nxmlrpclib.ProtocolError: <ProtocolError for redhat.example.com/rpc/api: 500 Internal Server Error>\n\nchannel.software.setSystemChannels is depreciated. \nhttps://access.redhat.com/documentation/en-US/Red_Hat_Network_Satellite/5.5/html/API_Overview/files/html/handlers/ChannelSoftwareHandler.html#setSystemChannels\n\nPR to follow\n\n", "before_files": [{"content": "#!/usr/bin/python\n\n# (c) Vincent Van de Kussen\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: rhn_channel\nshort_description: Adds or removes Red Hat software channels\ndescription:\n - Adds or removes Red Hat software channels\nversion_added: \"1.1\"\nauthor: Vincent Van der Kussen\nnotes:\n - this module fetches the system id from RHN. \nrequirements:\n - none\noptions:\n name:\n description:\n - name of the software channel\n required: true\n default: null\n sysname:\n description:\n - name of the system as it is known in RHN/Satellite\n required: true\n default: null\n state:\n description:\n - whether the channel should be present or not\n required: false\n default: present\n url:\n description: \n - The full url to the RHN/Satellite api\n required: true\n user:\n description:\n - RHN/Satellite user\n required: true\n password:\n description:\n - \"the user's password\"\n required: true\n'''\n\nEXAMPLES = '''\n- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme\n'''\n\nimport xmlrpclib\nfrom operator import itemgetter\nimport re\n\n\n# ------------------------------------------------------- #\n\ndef get_systemid(client, session, sysname):\n systems = client.system.listUserSystems(session)\n for system in systems:\n if system.get('name') == sysname:\n idres = system.get('id')\n idd = int(idres)\n return idd\n\n# ------------------------------------------------------- #\n\n# unused:\n#\n#def get_localsystemid():\n# f = open(\"/etc/sysconfig/rhn/systemid\", \"r\")\n# content = f.read()\n# loc_id = re.search(r'\\b(ID-)(\\d{10})' ,content)\n# return loc_id.group(2)\n\n# ------------------------------------------------------- #\n\ndef subscribe_channels(channels, client, session, sysname, sys_id):\n c = base_channels(client, session, sys_id)\n c.append(channels)\n return client.channel.software.setSystemChannels(session, sys_id, c)\n\n# ------------------------------------------------------- #\n\ndef unsubscribe_channels(channels, client, session, sysname, sys_id):\n c = base_channels(client, session, sys_id)\n c.remove(channels)\n return client.channel.software.setSystemChannels(session, sys_id, c)\n\n# ------------------------------------------------------- #\n\ndef base_channels(client, session, sys_id):\n basechan = client.channel.software.listSystemChannels(session, sys_id)\n try:\n chans = [item['label'] for item in basechan]\n except KeyError:\n chans = [item['channel_label'] for item in basechan]\n return chans\n\n# ------------------------------------------------------- #\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent']),\n name = dict(required=True),\n sysname = dict(required=True),\n url = dict(required=True),\n user = dict(required=True),\n password = dict(required=True, aliases=['pwd']),\n )\n# supports_check_mode=True\n )\n\n state = module.params['state']\n channelname = module.params['name']\n systname = module.params['sysname']\n saturl = module.params['url']\n user = module.params['user']\n password = module.params['password']\n \n #initialize connection\n client = xmlrpclib.Server(saturl, verbose=0)\n session = client.auth.login(user, password)\n \n # get systemid\n sys_id = get_systemid(client, session, systname)\n\n # get channels for system\n chans = base_channels(client, session, sys_id)\n \n \n if state == 'present':\n if channelname in chans:\n module.exit_json(changed=False, msg=\"Channel %s already exists\" % channelname)\n else:\n subscribe_channels(channelname, client, session, systname, sys_id)\n module.exit_json(changed=True, msg=\"Channel %s added\" % channelname)\n\n if state == 'absent':\n if not channelname in chans:\n module.exit_json(changed=False, msg=\"Not subscribed to channel %s.\" % channelname)\n else:\n unsubscribe_channels(channelname, client, session, systname, sys_id)\n module.exit_json(changed=True, msg=\"Channel %s removed\" % channelname)\n\n client.auth.logout(session)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n", "path": "packaging/os/rhn_channel.py"}], "after_files": [{"content": "#!/usr/bin/python\n\n# (c) Vincent Van de Kussen\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: rhn_channel\nshort_description: Adds or removes Red Hat software channels\ndescription:\n - Adds or removes Red Hat software channels\nversion_added: \"1.1\"\nauthor: Vincent Van der Kussen\nnotes:\n - this module fetches the system id from RHN. \nrequirements:\n - none\noptions:\n name:\n description:\n - name of the software channel\n required: true\n default: null\n sysname:\n description:\n - name of the system as it is known in RHN/Satellite\n required: true\n default: null\n state:\n description:\n - whether the channel should be present or not\n required: false\n default: present\n url:\n description: \n - The full url to the RHN/Satellite api\n required: true\n user:\n description:\n - RHN/Satellite user\n required: true\n password:\n description:\n - \"the user's password\"\n required: true\n'''\n\nEXAMPLES = '''\n- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme\n'''\n\nimport xmlrpclib\nfrom operator import itemgetter\nimport re\n\n\n# ------------------------------------------------------- #\n\ndef get_systemid(client, session, sysname):\n systems = client.system.listUserSystems(session)\n for system in systems:\n if system.get('name') == sysname:\n idres = system.get('id')\n idd = int(idres)\n return idd\n\n# ------------------------------------------------------- #\n\n# unused:\n#\n#def get_localsystemid():\n# f = open(\"/etc/sysconfig/rhn/systemid\", \"r\")\n# content = f.read()\n# loc_id = re.search(r'\\b(ID-)(\\d{10})' ,content)\n# return loc_id.group(2)\n\n# ------------------------------------------------------- #\n\ndef subscribe_channels(channelname, client, session, sysname, sys_id):\n channels = base_channels(client, session, sys_id)\n channels.append(channelname)\n return client.system.setChildChannels(session, sys_id, channels)\n\n# ------------------------------------------------------- #\n\ndef unsubscribe_channels(channelname, client, session, sysname, sys_id):\n channels = base_channels(client, session, sys_id)\n channels.remove(channelname)\n return client.system.setChildChannels(session, sys_id, channels)\n\n# ------------------------------------------------------- #\n\ndef base_channels(client, session, sys_id):\n basechan = client.channel.software.listSystemChannels(session, sys_id)\n try:\n chans = [item['label'] for item in basechan]\n except KeyError:\n chans = [item['channel_label'] for item in basechan]\n return chans\n\n# ------------------------------------------------------- #\n\n\ndef main():\n\n module = AnsibleModule(\n argument_spec = dict(\n state = dict(default='present', choices=['present', 'absent']),\n name = dict(required=True),\n sysname = dict(required=True),\n url = dict(required=True),\n user = dict(required=True),\n password = dict(required=True, aliases=['pwd']),\n )\n# supports_check_mode=True\n )\n\n state = module.params['state']\n channelname = module.params['name']\n systname = module.params['sysname']\n saturl = module.params['url']\n user = module.params['user']\n password = module.params['password']\n \n #initialize connection\n client = xmlrpclib.Server(saturl, verbose=0)\n session = client.auth.login(user, password)\n \n # get systemid\n sys_id = get_systemid(client, session, systname)\n\n # get channels for system\n chans = base_channels(client, session, sys_id)\n \n \n if state == 'present':\n if channelname in chans:\n module.exit_json(changed=False, msg=\"Channel %s already exists\" % channelname)\n else:\n subscribe_channels(channelname, client, session, systname, sys_id)\n module.exit_json(changed=True, msg=\"Channel %s added\" % channelname)\n\n if state == 'absent':\n if not channelname in chans:\n module.exit_json(changed=False, msg=\"Not subscribed to channel %s.\" % channelname)\n else:\n unsubscribe_channels(channelname, client, session, systname, sys_id)\n module.exit_json(changed=True, msg=\"Channel %s removed\" % channelname)\n\n client.auth.logout(session)\n\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nmain()\n\n", "path": "packaging/os/rhn_channel.py"}]} | 2,034 | 315 |
gh_patches_debug_4966 | rasdani/github-patches | git_diff | getpelican__pelican-1219 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
CATEGORIES_URL doesn't honor it's default value
First of all, thanks for this great software. But, when I'm using the variable CATEGORIES_URL in templates (category.html), it doesn't have it's default value (as said on the documentation: CATEGORIES_URL ('categories/')). I need to explicitly set this value in pelican.conf. I'm using Pelican 3.3.0
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pelican/settings.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import unicode_literals, print_function
3 import six
4
5 import copy
6 import inspect
7 import os
8 import locale
9 import logging
10
11 try:
12 # SourceFileLoader is the recommended way in 3.3+
13 from importlib.machinery import SourceFileLoader
14 load_source = lambda name, path: SourceFileLoader(name, path).load_module()
15 except ImportError:
16 # but it does not exist in 3.2-, so fall back to imp
17 import imp
18 load_source = imp.load_source
19
20 from os.path import isabs
21
22
23 logger = logging.getLogger(__name__)
24
25
26 DEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),
27 'themes', 'notmyidea')
28 DEFAULT_CONFIG = {
29 'PATH': os.curdir,
30 'ARTICLE_DIR': '',
31 'ARTICLE_EXCLUDES': ('pages',),
32 'PAGE_DIR': 'pages',
33 'PAGE_EXCLUDES': (),
34 'THEME': DEFAULT_THEME,
35 'OUTPUT_PATH': 'output',
36 'READERS': {},
37 'STATIC_PATHS': ['images', ],
38 'THEME_STATIC_DIR': 'theme',
39 'THEME_STATIC_PATHS': ['static', ],
40 'FEED_ALL_ATOM': os.path.join('feeds', 'all.atom.xml'),
41 'CATEGORY_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),
42 'TRANSLATION_FEED_ATOM': os.path.join('feeds', 'all-%s.atom.xml'),
43 'FEED_MAX_ITEMS': '',
44 'SITEURL': '',
45 'SITENAME': 'A Pelican Blog',
46 'DISPLAY_PAGES_ON_MENU': True,
47 'DISPLAY_CATEGORIES_ON_MENU': True,
48 'OUTPUT_SOURCES': False,
49 'OUTPUT_SOURCES_EXTENSION': '.text',
50 'USE_FOLDER_AS_CATEGORY': True,
51 'DEFAULT_CATEGORY': 'misc',
52 'WITH_FUTURE_DATES': True,
53 'CSS_FILE': 'main.css',
54 'NEWEST_FIRST_ARCHIVES': True,
55 'REVERSE_CATEGORY_ORDER': False,
56 'DELETE_OUTPUT_DIRECTORY': False,
57 'OUTPUT_RETENTION': (),
58 'ARTICLE_URL': '{slug}.html',
59 'ARTICLE_SAVE_AS': '{slug}.html',
60 'ARTICLE_LANG_URL': '{slug}-{lang}.html',
61 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',
62 'PAGE_URL': 'pages/{slug}.html',
63 'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'),
64 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',
65 'PAGE_LANG_SAVE_AS': os.path.join('pages', '{slug}-{lang}.html'),
66 'STATIC_URL': '{path}',
67 'STATIC_SAVE_AS': '{path}',
68 'PDF_GENERATOR': False,
69 'PDF_STYLE_PATH': '',
70 'PDF_STYLE': 'twelvepoint',
71 'CATEGORY_URL': 'category/{slug}.html',
72 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),
73 'TAG_URL': 'tag/{slug}.html',
74 'TAG_SAVE_AS': os.path.join('tag', '{slug}.html'),
75 'AUTHOR_URL': 'author/{slug}.html',
76 'AUTHOR_SAVE_AS': os.path.join('author', '{slug}.html'),
77 'PAGINATION_PATTERNS': [
78 (0, '{name}{number}.html', '{name}{number}.html'),
79 ],
80 'YEAR_ARCHIVE_SAVE_AS': False,
81 'MONTH_ARCHIVE_SAVE_AS': False,
82 'DAY_ARCHIVE_SAVE_AS': False,
83 'RELATIVE_URLS': False,
84 'DEFAULT_LANG': 'en',
85 'TAG_CLOUD_STEPS': 4,
86 'TAG_CLOUD_MAX_ITEMS': 100,
87 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'authors', 'archives'),
88 'EXTRA_TEMPLATES_PATHS': [],
89 'PAGINATED_DIRECT_TEMPLATES': ('index', ),
90 'PELICAN_CLASS': 'pelican.Pelican',
91 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',
92 'DATE_FORMATS': {},
93 'ASCIIDOC_OPTIONS': [],
94 'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'],
95 'JINJA_EXTENSIONS': [],
96 'JINJA_FILTERS': {},
97 'LOCALE': [''], # defaults to user locale
98 'DEFAULT_PAGINATION': False,
99 'DEFAULT_ORPHANS': 0,
100 'DEFAULT_METADATA': (),
101 'FILENAME_METADATA': '(?P<date>\d{4}-\d{2}-\d{2}).*',
102 'PATH_METADATA': '',
103 'EXTRA_PATH_METADATA': {},
104 'DEFAULT_STATUS': 'published',
105 'ARTICLE_PERMALINK_STRUCTURE': '',
106 'TYPOGRIFY': False,
107 'SUMMARY_MAX_LENGTH': 50,
108 'PLUGIN_PATH': '',
109 'PLUGINS': [],
110 'PYGMENTS_RST_OPTIONS': {},
111 'TEMPLATE_PAGES': {},
112 'IGNORE_FILES': ['.#*'],
113 'SLUG_SUBSTITUTIONS': (),
114 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',
115 }
116
117 PYGMENTS_RST_OPTIONS = None
118
119
120 def read_settings(path=None, override=None):
121 if path:
122 local_settings = get_settings_from_file(path)
123 # Make the paths relative to the settings file
124 for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'PLUGIN_PATH']:
125 if p in local_settings and local_settings[p] is not None \
126 and not isabs(local_settings[p]):
127 absp = os.path.abspath(os.path.normpath(os.path.join(
128 os.path.dirname(path), local_settings[p])))
129 if p not in ('THEME', 'PLUGIN_PATH') or os.path.exists(absp):
130 local_settings[p] = absp
131 else:
132 local_settings = copy.deepcopy(DEFAULT_CONFIG)
133
134 if override:
135 local_settings.update(override)
136
137 parsed_settings = configure_settings(local_settings)
138 # This is because there doesn't seem to be a way to pass extra
139 # parameters to docutils directive handlers, so we have to have a
140 # variable here that we'll import from within Pygments.run (see
141 # rstdirectives.py) to see what the user defaults were.
142 global PYGMENTS_RST_OPTIONS
143 PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)
144 return parsed_settings
145
146
147 def get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG):
148 """Loads settings from a module, returns a dictionary."""
149
150 context = copy.deepcopy(default_settings)
151 if module is not None:
152 context.update(
153 (k, v) for k, v in inspect.getmembers(module) if k.isupper())
154 return context
155
156
157 def get_settings_from_file(path, default_settings=DEFAULT_CONFIG):
158 """Loads settings from a file path, returning a dict."""
159
160 name, ext = os.path.splitext(os.path.basename(path))
161 module = load_source(name, path)
162 return get_settings_from_module(module, default_settings=default_settings)
163
164
165 def configure_settings(settings):
166 """Provide optimizations, error checking and warnings for the given
167 settings.
168
169 """
170 if not 'PATH' in settings or not os.path.isdir(settings['PATH']):
171 raise Exception('You need to specify a path containing the content'
172 ' (see pelican --help for more information)')
173
174 # lookup the theme in "pelican/themes" if the given one doesn't exist
175 if not os.path.isdir(settings['THEME']):
176 theme_path = os.path.join(
177 os.path.dirname(os.path.abspath(__file__)),
178 'themes',
179 settings['THEME'])
180 if os.path.exists(theme_path):
181 settings['THEME'] = theme_path
182 else:
183 raise Exception("Could not find the theme %s"
184 % settings['THEME'])
185
186 # standardize strings to lowercase strings
187 for key in [
188 'DEFAULT_LANG',
189 ]:
190 if key in settings:
191 settings[key] = settings[key].lower()
192
193 # standardize strings to lists
194 for key in [
195 'LOCALE',
196 ]:
197 if key in settings and isinstance(settings[key], six.string_types):
198 settings[key] = [settings[key]]
199
200 # check settings that must be a particular type
201 for key, types in [
202 ('OUTPUT_SOURCES_EXTENSION', six.string_types),
203 ('FILENAME_METADATA', six.string_types),
204 ]:
205 if key in settings and not isinstance(settings[key], types):
206 value = settings.pop(key)
207 logger.warn(
208 'Detected misconfigured {} ({}), '
209 'falling back to the default ({})'.format(
210 key, value, DEFAULT_CONFIG[key]))
211
212 # try to set the different locales, fallback on the default.
213 locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])
214
215 for locale_ in locales:
216 try:
217 locale.setlocale(locale.LC_ALL, str(locale_))
218 break # break if it is successful
219 except locale.Error:
220 pass
221 else:
222 logger.warning("LOCALE option doesn't contain a correct value")
223
224 if ('SITEURL' in settings):
225 # If SITEURL has a trailing slash, remove it and provide a warning
226 siteurl = settings['SITEURL']
227 if (siteurl.endswith('/')):
228 settings['SITEURL'] = siteurl[:-1]
229 logger.warning("Removed extraneous trailing slash from SITEURL.")
230 # If SITEURL is defined but FEED_DOMAIN isn't,
231 # set FEED_DOMAIN to SITEURL
232 if not 'FEED_DOMAIN' in settings:
233 settings['FEED_DOMAIN'] = settings['SITEURL']
234
235 # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined
236 feed_keys = [
237 'FEED_ATOM', 'FEED_RSS',
238 'FEED_ALL_ATOM', 'FEED_ALL_RSS',
239 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',
240 'TAG_FEED_ATOM', 'TAG_FEED_RSS',
241 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',
242 ]
243
244 if any(settings.get(k) for k in feed_keys):
245 if not settings.get('SITEURL'):
246 logger.warning('Feeds generated without SITEURL set properly may'
247 ' not be valid')
248
249 if not 'TIMEZONE' in settings:
250 logger.warning(
251 'No timezone information specified in the settings. Assuming'
252 ' your timezone is UTC for feed generation. Check '
253 'http://docs.getpelican.com/en/latest/settings.html#timezone '
254 'for more information')
255
256 # fix up pagination rules
257 from pelican.paginator import PaginationRule
258 pagination_rules = [
259 PaginationRule(*r) for r in settings.get(
260 'PAGINATION_PATTERNS',
261 DEFAULT_CONFIG['PAGINATION_PATTERNS'],
262 )
263 ]
264 settings['PAGINATION_PATTERNS'] = sorted(
265 pagination_rules,
266 key=lambda r: r[0],
267 )
268
269 # Save people from accidentally setting a string rather than a list
270 path_keys = (
271 'ARTICLE_EXCLUDES',
272 'DEFAULT_METADATA',
273 'DIRECT_TEMPLATES',
274 'EXTRA_TEMPLATES_PATHS',
275 'FILES_TO_COPY',
276 'IGNORE_FILES',
277 'JINJA_EXTENSIONS',
278 'PAGINATED_DIRECT_TEMPLATES',
279 'PLUGINS',
280 'STATIC_PATHS',
281 'THEME_STATIC_PATHS',
282 )
283 for PATH_KEY in filter(lambda k: k in settings, path_keys):
284 if isinstance(settings[PATH_KEY], six.string_types):
285 logger.warning("Detected misconfiguration with %s setting "
286 "(must be a list), falling back to the default"
287 % PATH_KEY)
288 settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]
289
290 for old, new, doc in [
291 ('LESS_GENERATOR', 'the Webassets plugin', None),
292 ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',
293 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),
294 ]:
295 if old in settings:
296 message = 'The {} setting has been removed in favor of {}'.format(
297 old, new)
298 if doc:
299 message += ', see {} for details'.format(doc)
300 logger.warning(message)
301
302 return settings
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pelican/settings.py b/pelican/settings.py
--- a/pelican/settings.py
+++ b/pelican/settings.py
@@ -68,6 +68,8 @@
'PDF_GENERATOR': False,
'PDF_STYLE_PATH': '',
'PDF_STYLE': 'twelvepoint',
+ 'CATEGORIES_URL': 'categories.html',
+ 'CATEGORIES_SAVE_AS': 'categories.html',
'CATEGORY_URL': 'category/{slug}.html',
'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),
'TAG_URL': 'tag/{slug}.html',
| {"golden_diff": "diff --git a/pelican/settings.py b/pelican/settings.py\n--- a/pelican/settings.py\n+++ b/pelican/settings.py\n@@ -68,6 +68,8 @@\n 'PDF_GENERATOR': False,\n 'PDF_STYLE_PATH': '',\n 'PDF_STYLE': 'twelvepoint',\n+ 'CATEGORIES_URL': 'categories.html',\n+ 'CATEGORIES_SAVE_AS': 'categories.html',\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n", "issue": "CATEGORIES_URL doesn't honor it's default value\nFirst of all, thanks for this great software. But, when I'm using the variable CATEGORIES_URL in templates (category.html), it doesn't have it's default value (as said on the documentation: CATEGORIES_URL ('categories/')). I need to explicitly set this value in pelican.conf. I'm using Pelican 3.3.0\n\nThanks\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nimport six\n\nimport copy\nimport inspect\nimport os\nimport locale\nimport logging\n\ntry:\n # SourceFileLoader is the recommended way in 3.3+\n from importlib.machinery import SourceFileLoader\n load_source = lambda name, path: SourceFileLoader(name, path).load_module()\nexcept ImportError:\n # but it does not exist in 3.2-, so fall back to imp\n import imp\n load_source = imp.load_source\n\nfrom os.path import isabs\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'themes', 'notmyidea')\nDEFAULT_CONFIG = {\n 'PATH': os.curdir,\n 'ARTICLE_DIR': '',\n 'ARTICLE_EXCLUDES': ('pages',),\n 'PAGE_DIR': 'pages',\n 'PAGE_EXCLUDES': (),\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output',\n 'READERS': {},\n 'STATIC_PATHS': ['images', ],\n 'THEME_STATIC_DIR': 'theme',\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED_ALL_ATOM': os.path.join('feeds', 'all.atom.xml'),\n 'CATEGORY_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),\n 'TRANSLATION_FEED_ATOM': os.path.join('feeds', 'all-%s.atom.xml'),\n 'FEED_MAX_ITEMS': '',\n 'SITEURL': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'DISPLAY_CATEGORIES_ON_MENU': True,\n 'OUTPUT_SOURCES': False,\n 'OUTPUT_SOURCES_EXTENSION': '.text',\n 'USE_FOLDER_AS_CATEGORY': True,\n 'DEFAULT_CATEGORY': 'misc',\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'NEWEST_FIRST_ARCHIVES': True,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'OUTPUT_RETENTION': (),\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'),\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': os.path.join('pages', '{slug}-{lang}.html'),\n 'STATIC_URL': '{path}',\n 'STATIC_SAVE_AS': '{path}',\n 'PDF_GENERATOR': False,\n 'PDF_STYLE_PATH': '',\n 'PDF_STYLE': 'twelvepoint',\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': os.path.join('tag', '{slug}.html'),\n 'AUTHOR_URL': 'author/{slug}.html',\n 'AUTHOR_SAVE_AS': os.path.join('author', '{slug}.html'),\n 'PAGINATION_PATTERNS': [\n (0, '{name}{number}.html', '{name}{number}.html'),\n ],\n 'YEAR_ARCHIVE_SAVE_AS': False,\n 'MONTH_ARCHIVE_SAVE_AS': False,\n 'DAY_ARCHIVE_SAVE_AS': False,\n 'RELATIVE_URLS': False,\n 'DEFAULT_LANG': 'en',\n 'TAG_CLOUD_STEPS': 4,\n 'TAG_CLOUD_MAX_ITEMS': 100,\n 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'authors', 'archives'),\n 'EXTRA_TEMPLATES_PATHS': [],\n 'PAGINATED_DIRECT_TEMPLATES': ('index', ),\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'ASCIIDOC_OPTIONS': [],\n 'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'],\n 'JINJA_EXTENSIONS': [],\n 'JINJA_FILTERS': {},\n 'LOCALE': [''], # defaults to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': (),\n 'FILENAME_METADATA': '(?P<date>\\d{4}-\\d{2}-\\d{2}).*',\n 'PATH_METADATA': '',\n 'EXTRA_PATH_METADATA': {},\n 'DEFAULT_STATUS': 'published',\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n 'SUMMARY_MAX_LENGTH': 50,\n 'PLUGIN_PATH': '',\n 'PLUGINS': [],\n 'PYGMENTS_RST_OPTIONS': {},\n 'TEMPLATE_PAGES': {},\n 'IGNORE_FILES': ['.#*'],\n 'SLUG_SUBSTITUTIONS': (),\n 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',\n }\n\nPYGMENTS_RST_OPTIONS = None\n\n\ndef read_settings(path=None, override=None):\n if path:\n local_settings = get_settings_from_file(path)\n # Make the paths relative to the settings file\n for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'PLUGIN_PATH']:\n if p in local_settings and local_settings[p] is not None \\\n and not isabs(local_settings[p]):\n absp = os.path.abspath(os.path.normpath(os.path.join(\n os.path.dirname(path), local_settings[p])))\n if p not in ('THEME', 'PLUGIN_PATH') or os.path.exists(absp):\n local_settings[p] = absp\n else:\n local_settings = copy.deepcopy(DEFAULT_CONFIG)\n\n if override:\n local_settings.update(override)\n\n parsed_settings = configure_settings(local_settings)\n # This is because there doesn't seem to be a way to pass extra\n # parameters to docutils directive handlers, so we have to have a\n # variable here that we'll import from within Pygments.run (see\n # rstdirectives.py) to see what the user defaults were.\n global PYGMENTS_RST_OPTIONS\n PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)\n return parsed_settings\n\n\ndef get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG):\n \"\"\"Loads settings from a module, returns a dictionary.\"\"\"\n\n context = copy.deepcopy(default_settings)\n if module is not None:\n context.update(\n (k, v) for k, v in inspect.getmembers(module) if k.isupper())\n return context\n\n\ndef get_settings_from_file(path, default_settings=DEFAULT_CONFIG):\n \"\"\"Loads settings from a file path, returning a dict.\"\"\"\n\n name, ext = os.path.splitext(os.path.basename(path))\n module = load_source(name, path)\n return get_settings_from_module(module, default_settings=default_settings)\n\n\ndef configure_settings(settings):\n \"\"\"Provide optimizations, error checking and warnings for the given\n settings.\n\n \"\"\"\n if not 'PATH' in settings or not os.path.isdir(settings['PATH']):\n raise Exception('You need to specify a path containing the content'\n ' (see pelican --help for more information)')\n\n # lookup the theme in \"pelican/themes\" if the given one doesn't exist\n if not os.path.isdir(settings['THEME']):\n theme_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'themes',\n settings['THEME'])\n if os.path.exists(theme_path):\n settings['THEME'] = theme_path\n else:\n raise Exception(\"Could not find the theme %s\"\n % settings['THEME'])\n\n # standardize strings to lowercase strings\n for key in [\n 'DEFAULT_LANG',\n ]:\n if key in settings:\n settings[key] = settings[key].lower()\n\n # standardize strings to lists\n for key in [\n 'LOCALE',\n ]:\n if key in settings and isinstance(settings[key], six.string_types):\n settings[key] = [settings[key]]\n\n # check settings that must be a particular type\n for key, types in [\n ('OUTPUT_SOURCES_EXTENSION', six.string_types),\n ('FILENAME_METADATA', six.string_types),\n ]:\n if key in settings and not isinstance(settings[key], types):\n value = settings.pop(key)\n logger.warn(\n 'Detected misconfigured {} ({}), '\n 'falling back to the default ({})'.format(\n key, value, DEFAULT_CONFIG[key]))\n\n # try to set the different locales, fallback on the default.\n locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, str(locale_))\n break # break if it is successful\n except locale.Error:\n pass\n else:\n logger.warning(\"LOCALE option doesn't contain a correct value\")\n\n if ('SITEURL' in settings):\n # If SITEURL has a trailing slash, remove it and provide a warning\n siteurl = settings['SITEURL']\n if (siteurl.endswith('/')):\n settings['SITEURL'] = siteurl[:-1]\n logger.warning(\"Removed extraneous trailing slash from SITEURL.\")\n # If SITEURL is defined but FEED_DOMAIN isn't,\n # set FEED_DOMAIN to SITEURL\n if not 'FEED_DOMAIN' in settings:\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n feed_keys = [\n 'FEED_ATOM', 'FEED_RSS',\n 'FEED_ALL_ATOM', 'FEED_ALL_RSS',\n 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',\n 'TAG_FEED_ATOM', 'TAG_FEED_RSS',\n 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',\n ]\n\n if any(settings.get(k) for k in feed_keys):\n if not settings.get('SITEURL'):\n logger.warning('Feeds generated without SITEURL set properly may'\n ' not be valid')\n\n if not 'TIMEZONE' in settings:\n logger.warning(\n 'No timezone information specified in the settings. Assuming'\n ' your timezone is UTC for feed generation. Check '\n 'http://docs.getpelican.com/en/latest/settings.html#timezone '\n 'for more information')\n\n # fix up pagination rules\n from pelican.paginator import PaginationRule\n pagination_rules = [\n PaginationRule(*r) for r in settings.get(\n 'PAGINATION_PATTERNS',\n DEFAULT_CONFIG['PAGINATION_PATTERNS'],\n )\n ]\n settings['PAGINATION_PATTERNS'] = sorted(\n pagination_rules,\n key=lambda r: r[0],\n )\n\n # Save people from accidentally setting a string rather than a list\n path_keys = (\n 'ARTICLE_EXCLUDES',\n 'DEFAULT_METADATA',\n 'DIRECT_TEMPLATES',\n 'EXTRA_TEMPLATES_PATHS',\n 'FILES_TO_COPY',\n 'IGNORE_FILES',\n 'JINJA_EXTENSIONS',\n 'PAGINATED_DIRECT_TEMPLATES',\n 'PLUGINS',\n 'STATIC_PATHS',\n 'THEME_STATIC_PATHS',\n )\n for PATH_KEY in filter(lambda k: k in settings, path_keys):\n if isinstance(settings[PATH_KEY], six.string_types):\n logger.warning(\"Detected misconfiguration with %s setting \"\n \"(must be a list), falling back to the default\"\n % PATH_KEY)\n settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]\n\n for old, new, doc in [\n ('LESS_GENERATOR', 'the Webassets plugin', None),\n ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',\n 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),\n ]:\n if old in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n old, new)\n if doc:\n message += ', see {} for details'.format(doc)\n logger.warning(message)\n\n return settings\n", "path": "pelican/settings.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals, print_function\nimport six\n\nimport copy\nimport inspect\nimport os\nimport locale\nimport logging\n\ntry:\n # SourceFileLoader is the recommended way in 3.3+\n from importlib.machinery import SourceFileLoader\n load_source = lambda name, path: SourceFileLoader(name, path).load_module()\nexcept ImportError:\n # but it does not exist in 3.2-, so fall back to imp\n import imp\n load_source = imp.load_source\n\nfrom os.path import isabs\n\n\nlogger = logging.getLogger(__name__)\n\n\nDEFAULT_THEME = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'themes', 'notmyidea')\nDEFAULT_CONFIG = {\n 'PATH': os.curdir,\n 'ARTICLE_DIR': '',\n 'ARTICLE_EXCLUDES': ('pages',),\n 'PAGE_DIR': 'pages',\n 'PAGE_EXCLUDES': (),\n 'THEME': DEFAULT_THEME,\n 'OUTPUT_PATH': 'output',\n 'READERS': {},\n 'STATIC_PATHS': ['images', ],\n 'THEME_STATIC_DIR': 'theme',\n 'THEME_STATIC_PATHS': ['static', ],\n 'FEED_ALL_ATOM': os.path.join('feeds', 'all.atom.xml'),\n 'CATEGORY_FEED_ATOM': os.path.join('feeds', '%s.atom.xml'),\n 'TRANSLATION_FEED_ATOM': os.path.join('feeds', 'all-%s.atom.xml'),\n 'FEED_MAX_ITEMS': '',\n 'SITEURL': '',\n 'SITENAME': 'A Pelican Blog',\n 'DISPLAY_PAGES_ON_MENU': True,\n 'DISPLAY_CATEGORIES_ON_MENU': True,\n 'OUTPUT_SOURCES': False,\n 'OUTPUT_SOURCES_EXTENSION': '.text',\n 'USE_FOLDER_AS_CATEGORY': True,\n 'DEFAULT_CATEGORY': 'misc',\n 'WITH_FUTURE_DATES': True,\n 'CSS_FILE': 'main.css',\n 'NEWEST_FIRST_ARCHIVES': True,\n 'REVERSE_CATEGORY_ORDER': False,\n 'DELETE_OUTPUT_DIRECTORY': False,\n 'OUTPUT_RETENTION': (),\n 'ARTICLE_URL': '{slug}.html',\n 'ARTICLE_SAVE_AS': '{slug}.html',\n 'ARTICLE_LANG_URL': '{slug}-{lang}.html',\n 'ARTICLE_LANG_SAVE_AS': '{slug}-{lang}.html',\n 'PAGE_URL': 'pages/{slug}.html',\n 'PAGE_SAVE_AS': os.path.join('pages', '{slug}.html'),\n 'PAGE_LANG_URL': 'pages/{slug}-{lang}.html',\n 'PAGE_LANG_SAVE_AS': os.path.join('pages', '{slug}-{lang}.html'),\n 'STATIC_URL': '{path}',\n 'STATIC_SAVE_AS': '{path}',\n 'PDF_GENERATOR': False,\n 'PDF_STYLE_PATH': '',\n 'PDF_STYLE': 'twelvepoint',\n 'CATEGORIES_URL': 'categories.html',\n 'CATEGORIES_SAVE_AS': 'categories.html',\n 'CATEGORY_URL': 'category/{slug}.html',\n 'CATEGORY_SAVE_AS': os.path.join('category', '{slug}.html'),\n 'TAG_URL': 'tag/{slug}.html',\n 'TAG_SAVE_AS': os.path.join('tag', '{slug}.html'),\n 'AUTHOR_URL': 'author/{slug}.html',\n 'AUTHOR_SAVE_AS': os.path.join('author', '{slug}.html'),\n 'PAGINATION_PATTERNS': [\n (0, '{name}{number}.html', '{name}{number}.html'),\n ],\n 'YEAR_ARCHIVE_SAVE_AS': False,\n 'MONTH_ARCHIVE_SAVE_AS': False,\n 'DAY_ARCHIVE_SAVE_AS': False,\n 'RELATIVE_URLS': False,\n 'DEFAULT_LANG': 'en',\n 'TAG_CLOUD_STEPS': 4,\n 'TAG_CLOUD_MAX_ITEMS': 100,\n 'DIRECT_TEMPLATES': ('index', 'tags', 'categories', 'authors', 'archives'),\n 'EXTRA_TEMPLATES_PATHS': [],\n 'PAGINATED_DIRECT_TEMPLATES': ('index', ),\n 'PELICAN_CLASS': 'pelican.Pelican',\n 'DEFAULT_DATE_FORMAT': '%a %d %B %Y',\n 'DATE_FORMATS': {},\n 'ASCIIDOC_OPTIONS': [],\n 'MD_EXTENSIONS': ['codehilite(css_class=highlight)', 'extra'],\n 'JINJA_EXTENSIONS': [],\n 'JINJA_FILTERS': {},\n 'LOCALE': [''], # defaults to user locale\n 'DEFAULT_PAGINATION': False,\n 'DEFAULT_ORPHANS': 0,\n 'DEFAULT_METADATA': (),\n 'FILENAME_METADATA': '(?P<date>\\d{4}-\\d{2}-\\d{2}).*',\n 'PATH_METADATA': '',\n 'EXTRA_PATH_METADATA': {},\n 'DEFAULT_STATUS': 'published',\n 'ARTICLE_PERMALINK_STRUCTURE': '',\n 'TYPOGRIFY': False,\n 'SUMMARY_MAX_LENGTH': 50,\n 'PLUGIN_PATH': '',\n 'PLUGINS': [],\n 'PYGMENTS_RST_OPTIONS': {},\n 'TEMPLATE_PAGES': {},\n 'IGNORE_FILES': ['.#*'],\n 'SLUG_SUBSTITUTIONS': (),\n 'INTRASITE_LINK_REGEX': '[{|](?P<what>.*?)[|}]',\n }\n\nPYGMENTS_RST_OPTIONS = None\n\n\ndef read_settings(path=None, override=None):\n if path:\n local_settings = get_settings_from_file(path)\n # Make the paths relative to the settings file\n for p in ['PATH', 'OUTPUT_PATH', 'THEME', 'PLUGIN_PATH']:\n if p in local_settings and local_settings[p] is not None \\\n and not isabs(local_settings[p]):\n absp = os.path.abspath(os.path.normpath(os.path.join(\n os.path.dirname(path), local_settings[p])))\n if p not in ('THEME', 'PLUGIN_PATH') or os.path.exists(absp):\n local_settings[p] = absp\n else:\n local_settings = copy.deepcopy(DEFAULT_CONFIG)\n\n if override:\n local_settings.update(override)\n\n parsed_settings = configure_settings(local_settings)\n # This is because there doesn't seem to be a way to pass extra\n # parameters to docutils directive handlers, so we have to have a\n # variable here that we'll import from within Pygments.run (see\n # rstdirectives.py) to see what the user defaults were.\n global PYGMENTS_RST_OPTIONS\n PYGMENTS_RST_OPTIONS = parsed_settings.get('PYGMENTS_RST_OPTIONS', None)\n return parsed_settings\n\n\ndef get_settings_from_module(module=None, default_settings=DEFAULT_CONFIG):\n \"\"\"Loads settings from a module, returns a dictionary.\"\"\"\n\n context = copy.deepcopy(default_settings)\n if module is not None:\n context.update(\n (k, v) for k, v in inspect.getmembers(module) if k.isupper())\n return context\n\n\ndef get_settings_from_file(path, default_settings=DEFAULT_CONFIG):\n \"\"\"Loads settings from a file path, returning a dict.\"\"\"\n\n name, ext = os.path.splitext(os.path.basename(path))\n module = load_source(name, path)\n return get_settings_from_module(module, default_settings=default_settings)\n\n\ndef configure_settings(settings):\n \"\"\"Provide optimizations, error checking and warnings for the given\n settings.\n\n \"\"\"\n if not 'PATH' in settings or not os.path.isdir(settings['PATH']):\n raise Exception('You need to specify a path containing the content'\n ' (see pelican --help for more information)')\n\n # lookup the theme in \"pelican/themes\" if the given one doesn't exist\n if not os.path.isdir(settings['THEME']):\n theme_path = os.path.join(\n os.path.dirname(os.path.abspath(__file__)),\n 'themes',\n settings['THEME'])\n if os.path.exists(theme_path):\n settings['THEME'] = theme_path\n else:\n raise Exception(\"Could not find the theme %s\"\n % settings['THEME'])\n\n # standardize strings to lowercase strings\n for key in [\n 'DEFAULT_LANG',\n ]:\n if key in settings:\n settings[key] = settings[key].lower()\n\n # standardize strings to lists\n for key in [\n 'LOCALE',\n ]:\n if key in settings and isinstance(settings[key], six.string_types):\n settings[key] = [settings[key]]\n\n # check settings that must be a particular type\n for key, types in [\n ('OUTPUT_SOURCES_EXTENSION', six.string_types),\n ('FILENAME_METADATA', six.string_types),\n ]:\n if key in settings and not isinstance(settings[key], types):\n value = settings.pop(key)\n logger.warn(\n 'Detected misconfigured {} ({}), '\n 'falling back to the default ({})'.format(\n key, value, DEFAULT_CONFIG[key]))\n\n # try to set the different locales, fallback on the default.\n locales = settings.get('LOCALE', DEFAULT_CONFIG['LOCALE'])\n\n for locale_ in locales:\n try:\n locale.setlocale(locale.LC_ALL, str(locale_))\n break # break if it is successful\n except locale.Error:\n pass\n else:\n logger.warning(\"LOCALE option doesn't contain a correct value\")\n\n if ('SITEURL' in settings):\n # If SITEURL has a trailing slash, remove it and provide a warning\n siteurl = settings['SITEURL']\n if (siteurl.endswith('/')):\n settings['SITEURL'] = siteurl[:-1]\n logger.warning(\"Removed extraneous trailing slash from SITEURL.\")\n # If SITEURL is defined but FEED_DOMAIN isn't,\n # set FEED_DOMAIN to SITEURL\n if not 'FEED_DOMAIN' in settings:\n settings['FEED_DOMAIN'] = settings['SITEURL']\n\n # Warn if feeds are generated with both SITEURL & FEED_DOMAIN undefined\n feed_keys = [\n 'FEED_ATOM', 'FEED_RSS',\n 'FEED_ALL_ATOM', 'FEED_ALL_RSS',\n 'CATEGORY_FEED_ATOM', 'CATEGORY_FEED_RSS',\n 'TAG_FEED_ATOM', 'TAG_FEED_RSS',\n 'TRANSLATION_FEED_ATOM', 'TRANSLATION_FEED_RSS',\n ]\n\n if any(settings.get(k) for k in feed_keys):\n if not settings.get('SITEURL'):\n logger.warning('Feeds generated without SITEURL set properly may'\n ' not be valid')\n\n if not 'TIMEZONE' in settings:\n logger.warning(\n 'No timezone information specified in the settings. Assuming'\n ' your timezone is UTC for feed generation. Check '\n 'http://docs.getpelican.com/en/latest/settings.html#timezone '\n 'for more information')\n\n # fix up pagination rules\n from pelican.paginator import PaginationRule\n pagination_rules = [\n PaginationRule(*r) for r in settings.get(\n 'PAGINATION_PATTERNS',\n DEFAULT_CONFIG['PAGINATION_PATTERNS'],\n )\n ]\n settings['PAGINATION_PATTERNS'] = sorted(\n pagination_rules,\n key=lambda r: r[0],\n )\n\n # Save people from accidentally setting a string rather than a list\n path_keys = (\n 'ARTICLE_EXCLUDES',\n 'DEFAULT_METADATA',\n 'DIRECT_TEMPLATES',\n 'EXTRA_TEMPLATES_PATHS',\n 'FILES_TO_COPY',\n 'IGNORE_FILES',\n 'JINJA_EXTENSIONS',\n 'PAGINATED_DIRECT_TEMPLATES',\n 'PLUGINS',\n 'STATIC_PATHS',\n 'THEME_STATIC_PATHS',\n )\n for PATH_KEY in filter(lambda k: k in settings, path_keys):\n if isinstance(settings[PATH_KEY], six.string_types):\n logger.warning(\"Detected misconfiguration with %s setting \"\n \"(must be a list), falling back to the default\"\n % PATH_KEY)\n settings[PATH_KEY] = DEFAULT_CONFIG[PATH_KEY]\n\n for old, new, doc in [\n ('LESS_GENERATOR', 'the Webassets plugin', None),\n ('FILES_TO_COPY', 'STATIC_PATHS and EXTRA_PATH_METADATA',\n 'https://github.com/getpelican/pelican/blob/master/docs/settings.rst#path-metadata'),\n ]:\n if old in settings:\n message = 'The {} setting has been removed in favor of {}'.format(\n old, new)\n if doc:\n message += ', see {} for details'.format(doc)\n logger.warning(message)\n\n return settings\n", "path": "pelican/settings.py"}]} | 3,797 | 133 |
gh_patches_debug_3973 | rasdani/github-patches | git_diff | encode__uvicorn-513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
--reload not working on Windows but --loop asyncio is
Duplicate of https://github.com/encode/uvicorn/issues/477 but per request a new topic to track the issue. Especially useful since `--loop asyncio` does seem to work just fine.
@tomchristie suggests to default to asyncio on all platforms. I can make a PR for that. Do you still want to keep the optional uvloop dependency? (It's incompatible with Windows in any case, so perhaps it should be removed from the requirements file at least.)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `uvicorn/loops/auto.py`
Content:
```
1 import sys
2
3
4 def auto_loop_setup():
5 try:
6 import uvloop
7 except ImportError as exc: # pragma: no cover
8 if sys.platform == "win32":
9 from uvicorn.loops.iocp import iocp_setup as loop_setup
10 else:
11 from uvicorn.loops.asyncio import asyncio_setup as loop_setup
12
13 loop_setup()
14 else:
15 from uvicorn.loops.uvloop import uvloop_setup
16
17 uvloop_setup()
18
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/uvicorn/loops/auto.py b/uvicorn/loops/auto.py
--- a/uvicorn/loops/auto.py
+++ b/uvicorn/loops/auto.py
@@ -5,10 +5,7 @@
try:
import uvloop
except ImportError as exc: # pragma: no cover
- if sys.platform == "win32":
- from uvicorn.loops.iocp import iocp_setup as loop_setup
- else:
- from uvicorn.loops.asyncio import asyncio_setup as loop_setup
+ from uvicorn.loops.asyncio import asyncio_setup as loop_setup
loop_setup()
else:
| {"golden_diff": "diff --git a/uvicorn/loops/auto.py b/uvicorn/loops/auto.py\n--- a/uvicorn/loops/auto.py\n+++ b/uvicorn/loops/auto.py\n@@ -5,10 +5,7 @@\n try:\n import uvloop\n except ImportError as exc: # pragma: no cover\n- if sys.platform == \"win32\":\n- from uvicorn.loops.iocp import iocp_setup as loop_setup\n- else:\n- from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n+ from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n \n loop_setup()\n else:\n", "issue": "--reload not working on Windows but --loop asyncio is\nDuplicate of https://github.com/encode/uvicorn/issues/477 but per request a new topic to track the issue. Especially useful since `--loop asyncio` does seem to work just fine.\r\n\r\n@tomchristie suggests to default to asyncio on all platforms. I can make a PR for that. Do you still want to keep the optional uvloop dependency? (It's incompatible with Windows in any case, so perhaps it should be removed from the requirements file at least.)\n", "before_files": [{"content": "import sys\n\n\ndef auto_loop_setup():\n try:\n import uvloop\n except ImportError as exc: # pragma: no cover\n if sys.platform == \"win32\":\n from uvicorn.loops.iocp import iocp_setup as loop_setup\n else:\n from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n\n loop_setup()\n else:\n from uvicorn.loops.uvloop import uvloop_setup\n\n uvloop_setup()\n", "path": "uvicorn/loops/auto.py"}], "after_files": [{"content": "import sys\n\n\ndef auto_loop_setup():\n try:\n import uvloop\n except ImportError as exc: # pragma: no cover\n from uvicorn.loops.asyncio import asyncio_setup as loop_setup\n\n loop_setup()\n else:\n from uvicorn.loops.uvloop import uvloop_setup\n\n uvloop_setup()\n", "path": "uvicorn/loops/auto.py"}]} | 502 | 145 |
gh_patches_debug_195 | rasdani/github-patches | git_diff | conda__conda-3524 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Progress bar broken

```
C:\Users\Korijn\dev\myproject>conda info
Current conda install:
platform : win-64
conda version : 4.2.7
conda is private : False
conda-env version : 4.2.7
conda-build version : 2.0.1
python version : 3.5.1.final.0
requests version : 2.9.1
root environment : C:\Users\Korijn\Miniconda3 (writable)
default environment : C:\Users\Korijn\Miniconda3
envs directories : C:\Users\Korijn\Miniconda3\envs
package cache : C:\Users\Korijn\Miniconda3\pkgs
channel URLs : https://repo.continuum.io/pkgs/free/win-64/
https://repo.continuum.io/pkgs/free/noarch/
https://repo.continuum.io/pkgs/pro/win-64/
https://repo.continuum.io/pkgs/pro/noarch/
https://repo.continuum.io/pkgs/msys2/win-64/
https://repo.continuum.io/pkgs/msys2/noarch/
config file : C:\Users\Korijn\.condarc
offline mode : False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/base/constants.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 This file should hold almost all string literals and magic numbers used throughout the code base.
4 The exception is if a literal is specifically meant to be private to and isolated within a module.
5 """
6 from __future__ import absolute_import, division, print_function
7
8 import os
9 import sys
10 from logging import getLogger
11 from platform import machine
12
13 from enum import Enum
14
15 from conda._vendor.auxlib.collection import frozendict
16
17 log = getLogger(__name__)
18
19
20 class Arch(Enum):
21 x86 = 'x86'
22 x86_64 = 'x86_64'
23 armv6l = 'armv6l'
24 armv7l = 'armv7l'
25 ppc64le = 'ppc64le'
26
27 @classmethod
28 def from_sys(cls):
29 return cls[machine()]
30
31
32 class Platform(Enum):
33 linux = 'linux'
34 win = 'win32'
35 openbsd = 'openbsd5'
36 osx = 'darwin'
37
38 @classmethod
39 def from_sys(cls):
40 p = sys.platform
41 if p.startswith('linux'):
42 # Changed in version 2.7.3: Since lots of code check for sys.platform == 'linux2',
43 # and there is no essential change between Linux 2.x and 3.x, sys.platform is always
44 # set to 'linux2', even on Linux 3.x. In Python 3.3 and later, the value will always
45 # be set to 'linux'
46 p = 'linux'
47 return cls(p)
48
49 machine_bits = 8 * tuple.__itemsize__
50
51 # UID = os.getuid()
52 PWD = os.getcwd()
53 CONDA = 'CONDA'
54 CONDA_ = 'CONDA_'
55 conda = 'conda'
56
57 SEARCH_PATH = (
58 '/etc/conda/condarc',
59 '/etc/conda/condarc.d/',
60 '/var/lib/conda/condarc',
61 '/var/lib/conda/condarc.d/',
62 '$CONDA_ROOT/condarc',
63 '$CONDA_ROOT/.condarc',
64 '$CONDA_ROOT/condarc.d/',
65 '~/.conda/condarc',
66 '~/.conda/condarc.d/',
67 '~/.condarc',
68 '$CONDA_PREFIX/.condarc',
69 '$CONDA_PREFIX/condarc.d/',
70 '$CONDARC',
71 )
72
73 DEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'
74
75 PLATFORM_DIRECTORIES = ("linux-64", "linux-32",
76 "win-64", "win-32",
77 "osx-64", "noarch")
78
79 RECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')
80
81
82 if Platform.from_sys() is Platform.win:
83 DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',
84 'https://repo.continuum.io/pkgs/pro',
85 'https://repo.continuum.io/pkgs/msys2',
86 )
87 else:
88 DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',
89 'https://repo.continuum.io/pkgs/pro',
90 )
91
92 ROOT_ENV_NAME = 'root'
93
94 EMPTY_LIST = ()
95 EMPTY_MAP = frozendict()
96
97
98 class _Null(object):
99 def __nonzero__(self):
100 return False
101
102 NULL = _Null()
103
104 UTF8 = 'UTF-8'
105
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conda/base/constants.py b/conda/base/constants.py
--- a/conda/base/constants.py
+++ b/conda/base/constants.py
@@ -99,6 +99,9 @@
def __nonzero__(self):
return False
+ def __bool__(self):
+ return False
+
NULL = _Null()
UTF8 = 'UTF-8'
| {"golden_diff": "diff --git a/conda/base/constants.py b/conda/base/constants.py\n--- a/conda/base/constants.py\n+++ b/conda/base/constants.py\n@@ -99,6 +99,9 @@\n def __nonzero__(self):\n return False\n \n+ def __bool__(self):\n+ return False\n+\n NULL = _Null()\n \n UTF8 = 'UTF-8'\n", "issue": "Progress bar broken\n\n\n```\nC:\\Users\\Korijn\\dev\\myproject>conda info\nCurrent conda install:\n\n platform : win-64\n conda version : 4.2.7\n conda is private : False\n conda-env version : 4.2.7\n conda-build version : 2.0.1\n python version : 3.5.1.final.0\n requests version : 2.9.1\n root environment : C:\\Users\\Korijn\\Miniconda3 (writable)\n default environment : C:\\Users\\Korijn\\Miniconda3\n envs directories : C:\\Users\\Korijn\\Miniconda3\\envs\n package cache : C:\\Users\\Korijn\\Miniconda3\\pkgs\n channel URLs : https://repo.continuum.io/pkgs/free/win-64/\n https://repo.continuum.io/pkgs/free/noarch/\n https://repo.continuum.io/pkgs/pro/win-64/\n https://repo.continuum.io/pkgs/pro/noarch/\n https://repo.continuum.io/pkgs/msys2/win-64/\n https://repo.continuum.io/pkgs/msys2/noarch/\n config file : C:\\Users\\Korijn\\.condarc\n offline mode : False\n```\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis file should hold almost all string literals and magic numbers used throughout the code base.\nThe exception is if a literal is specifically meant to be private to and isolated within a module.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom logging import getLogger\nfrom platform import machine\n\nfrom enum import Enum\n\nfrom conda._vendor.auxlib.collection import frozendict\n\nlog = getLogger(__name__)\n\n\nclass Arch(Enum):\n x86 = 'x86'\n x86_64 = 'x86_64'\n armv6l = 'armv6l'\n armv7l = 'armv7l'\n ppc64le = 'ppc64le'\n\n @classmethod\n def from_sys(cls):\n return cls[machine()]\n\n\nclass Platform(Enum):\n linux = 'linux'\n win = 'win32'\n openbsd = 'openbsd5'\n osx = 'darwin'\n\n @classmethod\n def from_sys(cls):\n p = sys.platform\n if p.startswith('linux'):\n # Changed in version 2.7.3: Since lots of code check for sys.platform == 'linux2',\n # and there is no essential change between Linux 2.x and 3.x, sys.platform is always\n # set to 'linux2', even on Linux 3.x. In Python 3.3 and later, the value will always\n # be set to 'linux'\n p = 'linux'\n return cls(p)\n\nmachine_bits = 8 * tuple.__itemsize__\n\n# UID = os.getuid()\nPWD = os.getcwd()\nCONDA = 'CONDA'\nCONDA_ = 'CONDA_'\nconda = 'conda'\n\nSEARCH_PATH = (\n '/etc/conda/condarc',\n '/etc/conda/condarc.d/',\n '/var/lib/conda/condarc',\n '/var/lib/conda/condarc.d/',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc.d/',\n '~/.conda/condarc',\n '~/.conda/condarc.d/',\n '~/.condarc',\n '$CONDA_PREFIX/.condarc',\n '$CONDA_PREFIX/condarc.d/',\n '$CONDARC',\n)\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'\n\nPLATFORM_DIRECTORIES = (\"linux-64\", \"linux-32\",\n \"win-64\", \"win-32\",\n \"osx-64\", \"noarch\")\n\nRECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')\n\n\nif Platform.from_sys() is Platform.win:\n DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro',\n 'https://repo.continuum.io/pkgs/msys2',\n )\nelse:\n DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro',\n )\n\nROOT_ENV_NAME = 'root'\n\nEMPTY_LIST = ()\nEMPTY_MAP = frozendict()\n\n\nclass _Null(object):\n def __nonzero__(self):\n return False\n\nNULL = _Null()\n\nUTF8 = 'UTF-8'\n", "path": "conda/base/constants.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nThis file should hold almost all string literals and magic numbers used throughout the code base.\nThe exception is if a literal is specifically meant to be private to and isolated within a module.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nimport os\nimport sys\nfrom logging import getLogger\nfrom platform import machine\n\nfrom enum import Enum\n\nfrom conda._vendor.auxlib.collection import frozendict\n\nlog = getLogger(__name__)\n\n\nclass Arch(Enum):\n x86 = 'x86'\n x86_64 = 'x86_64'\n armv6l = 'armv6l'\n armv7l = 'armv7l'\n ppc64le = 'ppc64le'\n\n @classmethod\n def from_sys(cls):\n return cls[machine()]\n\n\nclass Platform(Enum):\n linux = 'linux'\n win = 'win32'\n openbsd = 'openbsd5'\n osx = 'darwin'\n\n @classmethod\n def from_sys(cls):\n p = sys.platform\n if p.startswith('linux'):\n # Changed in version 2.7.3: Since lots of code check for sys.platform == 'linux2',\n # and there is no essential change between Linux 2.x and 3.x, sys.platform is always\n # set to 'linux2', even on Linux 3.x. In Python 3.3 and later, the value will always\n # be set to 'linux'\n p = 'linux'\n return cls(p)\n\nmachine_bits = 8 * tuple.__itemsize__\n\n# UID = os.getuid()\nPWD = os.getcwd()\nCONDA = 'CONDA'\nCONDA_ = 'CONDA_'\nconda = 'conda'\n\nSEARCH_PATH = (\n '/etc/conda/condarc',\n '/etc/conda/condarc.d/',\n '/var/lib/conda/condarc',\n '/var/lib/conda/condarc.d/',\n '$CONDA_ROOT/condarc',\n '$CONDA_ROOT/.condarc',\n '$CONDA_ROOT/condarc.d/',\n '~/.conda/condarc',\n '~/.conda/condarc.d/',\n '~/.condarc',\n '$CONDA_PREFIX/.condarc',\n '$CONDA_PREFIX/condarc.d/',\n '$CONDARC',\n)\n\nDEFAULT_CHANNEL_ALIAS = 'https://conda.anaconda.org/'\n\nPLATFORM_DIRECTORIES = (\"linux-64\", \"linux-32\",\n \"win-64\", \"win-32\",\n \"osx-64\", \"noarch\")\n\nRECOGNIZED_URL_SCHEMES = ('http', 'https', 'ftp', 's3', 'file')\n\n\nif Platform.from_sys() is Platform.win:\n DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro',\n 'https://repo.continuum.io/pkgs/msys2',\n )\nelse:\n DEFAULT_CHANNELS = ('https://repo.continuum.io/pkgs/free',\n 'https://repo.continuum.io/pkgs/pro',\n )\n\nROOT_ENV_NAME = 'root'\n\nEMPTY_LIST = ()\nEMPTY_MAP = frozendict()\n\n\nclass _Null(object):\n def __nonzero__(self):\n return False\n\n def __bool__(self):\n return False\n\nNULL = _Null()\n\nUTF8 = 'UTF-8'\n", "path": "conda/base/constants.py"}]} | 1,586 | 84 |
gh_patches_debug_14579 | rasdani/github-patches | git_diff | boto__boto-1543 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Support addtional regions in CloudSearch
It appears that boto currently only supports two regions. It would be nice to support the more recently added ones.
Current list of CloudSearch regions is here: http://aws.amazon.com/cloudsearch/faqs/#Which_AWS_regions_is_CloudSearch_available_in?
I think it just needs to be added to boto/cloudsearch/**init**.py:
return [RegionInfo(name='us-east-1',
endpoint='cloudsearch.us-east-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
And yes, you can use another region by changing the default field in .boto
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `boto/cloudsearch/__init__.py`
Content:
```
1 # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
2 # Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
3 # All Rights Reserved
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining a
6 # copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish, dis-
9 # tribute, sublicense, and/or sell copies of the Software, and to permit
10 # persons to whom the Software is furnished to do so, subject to the fol-
11 # lowing conditions:
12 #
13 # The above copyright notice and this permission notice shall be included
14 # in all copies or substantial portions of the Software.
15 #
16 # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 # OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
18 # ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
19 # SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
20 # WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 # IN THE SOFTWARE.
23 #
24 from boto.ec2.regioninfo import RegionInfo
25
26
27 def regions():
28 """
29 Get all available regions for the Amazon CloudSearch service.
30
31 :rtype: list
32 :return: A list of :class:`boto.regioninfo.RegionInfo`
33 """
34 import boto.cloudsearch.layer1
35 return [RegionInfo(name='us-east-1',
36 endpoint='cloudsearch.us-east-1.amazonaws.com',
37 connection_cls=boto.cloudsearch.layer1.Layer1),
38 RegionInfo(name='eu-west-1',
39 endpoint='cloudsearch.eu-west-1.amazonaws.com',
40 connection_cls=boto.cloudsearch.layer1.Layer1),
41 ]
42
43
44 def connect_to_region(region_name, **kw_params):
45 for region in regions():
46 if region.name == region_name:
47 return region.connect(**kw_params)
48 return None
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py
--- a/boto/cloudsearch/__init__.py
+++ b/boto/cloudsearch/__init__.py
@@ -38,6 +38,16 @@
RegionInfo(name='eu-west-1',
endpoint='cloudsearch.eu-west-1.amazonaws.com',
connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-1',
+ endpoint='cloudsearch.us-west-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='us-west-2',
+ endpoint='cloudsearch.us-west-2.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+ RegionInfo(name='ap-southeast-1',
+ endpoint='cloudsearch.ap-southeast-1.amazonaws.com',
+ connection_cls=boto.cloudsearch.layer1.Layer1),
+
]
| {"golden_diff": "diff --git a/boto/cloudsearch/__init__.py b/boto/cloudsearch/__init__.py\n--- a/boto/cloudsearch/__init__.py\n+++ b/boto/cloudsearch/__init__.py\n@@ -38,6 +38,16 @@\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n+ RegionInfo(name='us-west-1',\n+ endpoint='cloudsearch.us-west-1.amazonaws.com',\n+ connection_cls=boto.cloudsearch.layer1.Layer1),\n+ RegionInfo(name='us-west-2',\n+ endpoint='cloudsearch.us-west-2.amazonaws.com',\n+ connection_cls=boto.cloudsearch.layer1.Layer1),\n+ RegionInfo(name='ap-southeast-1',\n+ endpoint='cloudsearch.ap-southeast-1.amazonaws.com',\n+ connection_cls=boto.cloudsearch.layer1.Layer1),\n+\n ]\n", "issue": "Support addtional regions in CloudSearch \nIt appears that boto currently only supports two regions. It would be nice to support the more recently added ones. \n\nCurrent list of CloudSearch regions is here: http://aws.amazon.com/cloudsearch/faqs/#Which_AWS_regions_is_CloudSearch_available_in?\n\nI think it just needs to be added to boto/cloudsearch/**init**.py:\n\nreturn [RegionInfo(name='us-east-1',\n endpoint='cloudsearch.us-east-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n\nAnd yes, you can use another region by changing the default field in .boto \n\n", "before_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.\n# All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\nfrom boto.ec2.regioninfo import RegionInfo\n\n\ndef regions():\n \"\"\"\n Get all available regions for the Amazon CloudSearch service.\n\n :rtype: list\n :return: A list of :class:`boto.regioninfo.RegionInfo`\n \"\"\"\n import boto.cloudsearch.layer1\n return [RegionInfo(name='us-east-1',\n endpoint='cloudsearch.us-east-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n ]\n\n\ndef connect_to_region(region_name, **kw_params):\n for region in regions():\n if region.name == region_name:\n return region.connect(**kw_params)\n return None\n", "path": "boto/cloudsearch/__init__.py"}], "after_files": [{"content": "# Copyright (c) 2012 Mitch Garnaat http://garnaat.org/\n# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.\n# All Rights Reserved\n#\n# Permission is hereby granted, free of charge, to any person obtaining a\n# copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish, dis-\n# tribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the fol-\n# lowing conditions:\n#\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\n# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-\n# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT\n# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n#\nfrom boto.ec2.regioninfo import RegionInfo\n\n\ndef regions():\n \"\"\"\n Get all available regions for the Amazon CloudSearch service.\n\n :rtype: list\n :return: A list of :class:`boto.regioninfo.RegionInfo`\n \"\"\"\n import boto.cloudsearch.layer1\n return [RegionInfo(name='us-east-1',\n endpoint='cloudsearch.us-east-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='eu-west-1',\n endpoint='cloudsearch.eu-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='us-west-1',\n endpoint='cloudsearch.us-west-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='us-west-2',\n endpoint='cloudsearch.us-west-2.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n RegionInfo(name='ap-southeast-1',\n endpoint='cloudsearch.ap-southeast-1.amazonaws.com',\n connection_cls=boto.cloudsearch.layer1.Layer1),\n\n ]\n\n\ndef connect_to_region(region_name, **kw_params):\n for region in regions():\n if region.name == region_name:\n return region.connect(**kw_params)\n return None\n", "path": "boto/cloudsearch/__init__.py"}]} | 964 | 209 |
gh_patches_debug_24204 | rasdani/github-patches | git_diff | ansible-collections__community.general-1110 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
capabilities: libcap-2.4.3 changed output of getcap
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
getcap output changed in version 2.43 vs. 2.26 of libcap, breaking community.general.capabilities module
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
capabilities
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```
ansible 2.10.1
config file = /home/thomas/git/ansible/ansible.cfg
configured module search path = ['/home/thomas/git/ansible/library']
ansible python module location = /home/thomas/.python-venvs/_home_thomas_git_ansible/lib/python3.6/site-packages/ansible
executable location = /home/thomas/.python-venvs/_home_thomas_git_ansible/bin/ansible
python version = 3.6.9 (default, Jul 17 2020, 12:50:27) [GCC 8.4.0]
```
##### CONFIGURATION
<!--- Paste verbatim output from "ansible-config dump --only-changed" between quotes -->
```
CACHE_PLUGIN(/home/thomas/git/ansible/ansible.cfg) = jsonfile
CACHE_PLUGIN_CONNECTION(/home/thomas/git/ansible/ansible.cfg) = factscache
CACHE_PLUGIN_TIMEOUT(/home/thomas/git/ansible/ansible.cfg) = 86400
DEFAULT_BECOME(/home/thomas/git/ansible/ansible.cfg) = True
DEFAULT_BECOME_ASK_PASS(/home/thomas/git/ansible/ansible.cfg) = False
DEFAULT_BECOME_METHOD(/home/thomas/git/ansible/ansible.cfg) = sudo
DEFAULT_BECOME_USER(/home/thomas/git/ansible/ansible.cfg) = root
DEFAULT_CALLBACK_WHITELIST(/home/thomas/git/ansible/ansible.cfg) = ['profile_roles', 'profile_tasks', 'timer']
DEFAULT_FORKS(/home/thomas/git/ansible/ansible.cfg) = 12
DEFAULT_GATHERING(/home/thomas/git/ansible/ansible.cfg) = smart
DEFAULT_HOST_LIST(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/inventory/production.py', '/home/thomas/git/ansible/inventory/nameserver_hcloud.yaml']
DEFAULT_MANAGED_STR(/home/thomas/git/ansible/ansible.cfg) = This file is managed by Ansible. Do not edit manually!
DEFAULT_MODULE_PATH(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/library']
INTERPRETER_PYTHON(/home/thomas/git/ansible/ansible.cfg) = auto_silent
INVENTORY_ENABLED(/home/thomas/git/ansible/ansible.cfg) = ['script', 'hcloud']
MAX_FILE_SIZE_FOR_DIFF(/home/thomas/git/ansible/ansible.cfg) = 1048576
```
##### OS / ENVIRONMENT
<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->
Host running ansible is Ubuntu 18.04, target system is Gentoo with sys-libs/libcap-2.4.3
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: ensure capabilities for ping are set
community.general.capabilities:
path: /bin/ping
capability: cap_net_raw+ep
state: present
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
Ansible sets cap_net_raw+ep if it is not already set
##### ACTUAL RESULTS
<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->
Ansible fails getting current capabilities.
<!--- Paste verbatim command output between quotes -->
```paste below
fatal: [server20.tralios.de]: FAILED! => {
"changed": false,
"invocation": {
"module_args": {
"capability": "cap_net_raw+ep",
"path": "/bin/ping",
"state": "present"
}
},
"msg": "Unable to get capabilities of /bin/ping",
"stderr": "",
"stderr_lines": [],
"stdout": "/bin/ping cap_net_raw=ep",
"stdout_lines": [
"/bin/ping cap_net_raw=ep"
]
}
```
##### Additional information
Version 2.26:
```
➜ touch capabilities_test
➜ sudo setcap cap_net_raw+ep capabilities_test
➜ getcap capabilities_test
capabilities_test = cap_net_raw+ep
```
Version 2.43:
```
➜ getcap capabilities_test
capabilities_test cap_net_raw=ep
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/system/capabilities.py`
Content:
```
1 #!/usr/bin/python
2 # -*- coding: utf-8 -*-
3
4 # Copyright: (c) 2014, Nate Coraor <[email protected]>
5 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
6
7 from __future__ import absolute_import, division, print_function
8 __metaclass__ = type
9
10 DOCUMENTATION = r'''
11 ---
12 module: capabilities
13 short_description: Manage Linux capabilities
14 description:
15 - This module manipulates files privileges using the Linux capabilities(7) system.
16 options:
17 path:
18 description:
19 - Specifies the path to the file to be managed.
20 type: str
21 required: yes
22 aliases: [ key ]
23 capability:
24 description:
25 - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))
26 type: str
27 required: yes
28 aliases: [ cap ]
29 state:
30 description:
31 - Whether the entry should be present or absent in the file's capabilities.
32 type: str
33 choices: [ absent, present ]
34 default: present
35 notes:
36 - The capabilities system will automatically transform operators and flags into the effective set,
37 so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).
38 - This module does not attempt to determine the final operator and flags to compare,
39 so you will want to ensure that your capabilities argument matches the final capabilities.
40 author:
41 - Nate Coraor (@natefoo)
42 '''
43
44 EXAMPLES = r'''
45 - name: Set cap_sys_chroot+ep on /foo
46 community.general.capabilities:
47 path: /foo
48 capability: cap_sys_chroot+ep
49 state: present
50
51 - name: Remove cap_net_bind_service from /bar
52 community.general.capabilities:
53 path: /bar
54 capability: cap_net_bind_service
55 state: absent
56 '''
57
58 from ansible.module_utils.basic import AnsibleModule
59
60 OPS = ('=', '-', '+')
61
62
63 class CapabilitiesModule(object):
64 platform = 'Linux'
65 distribution = None
66
67 def __init__(self, module):
68 self.module = module
69 self.path = module.params['path'].strip()
70 self.capability = module.params['capability'].strip().lower()
71 self.state = module.params['state']
72 self.getcap_cmd = module.get_bin_path('getcap', required=True)
73 self.setcap_cmd = module.get_bin_path('setcap', required=True)
74 self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')
75
76 self.run()
77
78 def run(self):
79
80 current = self.getcap(self.path)
81 caps = [cap[0] for cap in current]
82
83 if self.state == 'present' and self.capability_tup not in current:
84 # need to add capability
85 if self.module.check_mode:
86 self.module.exit_json(changed=True, msg='capabilities changed')
87 else:
88 # remove from current cap list if it's already set (but op/flags differ)
89 current = list(filter(lambda x: x[0] != self.capability_tup[0], current))
90 # add new cap with correct op/flags
91 current.append(self.capability_tup)
92 self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
93 elif self.state == 'absent' and self.capability_tup[0] in caps:
94 # need to remove capability
95 if self.module.check_mode:
96 self.module.exit_json(changed=True, msg='capabilities changed')
97 else:
98 # remove from current cap list and then set current list
99 current = filter(lambda x: x[0] != self.capability_tup[0], current)
100 self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))
101 self.module.exit_json(changed=False, state=self.state)
102
103 def getcap(self, path):
104 rval = []
105 cmd = "%s -v %s" % (self.getcap_cmd, path)
106 rc, stdout, stderr = self.module.run_command(cmd)
107 # If file xattrs are set but no caps are set the output will be:
108 # '/foo ='
109 # If file xattrs are unset the output will be:
110 # '/foo'
111 # If the file does not exist the output will be (with rc == 0...):
112 # '/foo (No such file or directory)'
113 if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
114 self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
115 if stdout.strip() != path:
116 caps = stdout.split(' =')[1].strip().split()
117 for cap in caps:
118 cap = cap.lower()
119 # getcap condenses capabilities with the same op/flags into a
120 # comma-separated list, so we have to parse that
121 if ',' in cap:
122 cap_group = cap.split(',')
123 cap_group[-1], op, flags = self._parse_cap(cap_group[-1])
124 for subcap in cap_group:
125 rval.append((subcap, op, flags))
126 else:
127 rval.append(self._parse_cap(cap))
128 return rval
129
130 def setcap(self, path, caps):
131 caps = ' '.join([''.join(cap) for cap in caps])
132 cmd = "%s '%s' %s" % (self.setcap_cmd, caps, path)
133 rc, stdout, stderr = self.module.run_command(cmd)
134 if rc != 0:
135 self.module.fail_json(msg="Unable to set capabilities of %s" % path, stdout=stdout, stderr=stderr)
136 else:
137 return stdout
138
139 def _parse_cap(self, cap, op_required=True):
140 opind = -1
141 try:
142 i = 0
143 while opind == -1:
144 opind = cap.find(OPS[i])
145 i += 1
146 except Exception:
147 if op_required:
148 self.module.fail_json(msg="Couldn't find operator (one of: %s)" % str(OPS))
149 else:
150 return (cap, None, None)
151 op = cap[opind]
152 cap, flags = cap.split(op)
153 return (cap, op, flags)
154
155
156 # ==============================================================
157 # main
158
159 def main():
160 # defining module
161 module = AnsibleModule(
162 argument_spec=dict(
163 path=dict(type='str', required=True, aliases=['key']),
164 capability=dict(type='str', required=True, aliases=['cap']),
165 state=dict(type='str', default='present', choices=['absent', 'present']),
166 ),
167 supports_check_mode=True,
168 )
169
170 CapabilitiesModule(module)
171
172
173 if __name__ == '__main__':
174 main()
175
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/system/capabilities.py
--- a/plugins/modules/system/capabilities.py
+++ b/plugins/modules/system/capabilities.py
@@ -108,12 +108,18 @@
# '/foo ='
# If file xattrs are unset the output will be:
# '/foo'
- # If the file does not exist the output will be (with rc == 0...):
+ # If the file does not exist, the stderr will be (with rc == 0...):
# '/foo (No such file or directory)'
- if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):
+ if rc != 0 or stderr != "":
self.module.fail_json(msg="Unable to get capabilities of %s" % path, stdout=stdout.strip(), stderr=stderr)
if stdout.strip() != path:
- caps = stdout.split(' =')[1].strip().split()
+ if ' =' in stdout:
+ # process output of an older version of libcap
+ caps = stdout.split(' =')[1].strip().split()
+ else:
+ # otherwise, we have a newer version here
+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git
+ caps = stdout.split()[1].strip().split()
for cap in caps:
cap = cap.lower()
# getcap condenses capabilities with the same op/flags into a
| {"golden_diff": "diff --git a/plugins/modules/system/capabilities.py b/plugins/modules/system/capabilities.py\n--- a/plugins/modules/system/capabilities.py\n+++ b/plugins/modules/system/capabilities.py\n@@ -108,12 +108,18 @@\n # '/foo ='\n # If file xattrs are unset the output will be:\n # '/foo'\n- # If the file does not exist the output will be (with rc == 0...):\n+ # If the file does not exist, the stderr will be (with rc == 0...):\n # '/foo (No such file or directory)'\n- if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):\n+ if rc != 0 or stderr != \"\":\n self.module.fail_json(msg=\"Unable to get capabilities of %s\" % path, stdout=stdout.strip(), stderr=stderr)\n if stdout.strip() != path:\n- caps = stdout.split(' =')[1].strip().split()\n+ if ' =' in stdout:\n+ # process output of an older version of libcap\n+ caps = stdout.split(' =')[1].strip().split()\n+ else:\n+ # otherwise, we have a newer version here\n+ # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git\n+ caps = stdout.split()[1].strip().split()\n for cap in caps:\n cap = cap.lower()\n # getcap condenses capabilities with the same op/flags into a\n", "issue": "capabilities: libcap-2.4.3 changed output of getcap\n<!--- Verify first that your issue is not already reported on GitHub -->\r\n<!--- Also test if the latest release and devel branch are affected too -->\r\n<!--- Complete *all* sections as described, this form is processed automatically -->\r\n\r\n##### SUMMARY\r\ngetcap output changed in version 2.43 vs. 2.26 of libcap, breaking community.general.capabilities module\r\n\r\n##### ISSUE TYPE\r\n- Bug Report\r\n\r\n##### COMPONENT NAME\r\ncapabilities\r\n\r\n##### ANSIBLE VERSION\r\n<!--- Paste verbatim output from \"ansible --version\" between quotes -->\r\n```\r\nansible 2.10.1\r\n config file = /home/thomas/git/ansible/ansible.cfg\r\n configured module search path = ['/home/thomas/git/ansible/library']\r\n ansible python module location = /home/thomas/.python-venvs/_home_thomas_git_ansible/lib/python3.6/site-packages/ansible\r\n executable location = /home/thomas/.python-venvs/_home_thomas_git_ansible/bin/ansible\r\n python version = 3.6.9 (default, Jul 17 2020, 12:50:27) [GCC 8.4.0]\r\n```\r\n\r\n##### CONFIGURATION\r\n<!--- Paste verbatim output from \"ansible-config dump --only-changed\" between quotes -->\r\n```\r\nCACHE_PLUGIN(/home/thomas/git/ansible/ansible.cfg) = jsonfile\r\nCACHE_PLUGIN_CONNECTION(/home/thomas/git/ansible/ansible.cfg) = factscache\r\nCACHE_PLUGIN_TIMEOUT(/home/thomas/git/ansible/ansible.cfg) = 86400\r\nDEFAULT_BECOME(/home/thomas/git/ansible/ansible.cfg) = True\r\nDEFAULT_BECOME_ASK_PASS(/home/thomas/git/ansible/ansible.cfg) = False\r\nDEFAULT_BECOME_METHOD(/home/thomas/git/ansible/ansible.cfg) = sudo\r\nDEFAULT_BECOME_USER(/home/thomas/git/ansible/ansible.cfg) = root\r\nDEFAULT_CALLBACK_WHITELIST(/home/thomas/git/ansible/ansible.cfg) = ['profile_roles', 'profile_tasks', 'timer']\r\nDEFAULT_FORKS(/home/thomas/git/ansible/ansible.cfg) = 12\r\nDEFAULT_GATHERING(/home/thomas/git/ansible/ansible.cfg) = smart\r\nDEFAULT_HOST_LIST(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/inventory/production.py', '/home/thomas/git/ansible/inventory/nameserver_hcloud.yaml']\r\nDEFAULT_MANAGED_STR(/home/thomas/git/ansible/ansible.cfg) = This file is managed by Ansible. Do not edit manually!\r\nDEFAULT_MODULE_PATH(/home/thomas/git/ansible/ansible.cfg) = ['/home/thomas/git/ansible/library']\r\nINTERPRETER_PYTHON(/home/thomas/git/ansible/ansible.cfg) = auto_silent\r\nINVENTORY_ENABLED(/home/thomas/git/ansible/ansible.cfg) = ['script', 'hcloud']\r\nMAX_FILE_SIZE_FOR_DIFF(/home/thomas/git/ansible/ansible.cfg) = 1048576\r\n```\r\n\r\n##### OS / ENVIRONMENT\r\n<!--- Provide all relevant information below, e.g. target OS versions, network device firmware, etc. -->\r\nHost running ansible is Ubuntu 18.04, target system is Gentoo with sys-libs/libcap-2.4.3\r\n\r\n##### STEPS TO REPRODUCE\r\n<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->\r\n\r\n<!--- Paste example playbooks or commands between quotes below -->\r\n```yaml\r\n- name: ensure capabilities for ping are set\r\n community.general.capabilities:\r\n path: /bin/ping\r\n capability: cap_net_raw+ep\r\n state: present\r\n```\r\n\r\n<!--- HINT: You can paste gist.github.com links for larger files -->\r\n\r\n##### EXPECTED RESULTS\r\n<!--- Describe what you expected to happen when running the steps above -->\r\nAnsible sets cap_net_raw+ep if it is not already set\r\n\r\n##### ACTUAL RESULTS\r\n<!--- Describe what actually happened. If possible run with extra verbosity (-vvvv) -->\r\nAnsible fails getting current capabilities.\r\n\r\n<!--- Paste verbatim command output between quotes -->\r\n```paste below\r\nfatal: [server20.tralios.de]: FAILED! => {\r\n \"changed\": false,\r\n \"invocation\": {\r\n \"module_args\": {\r\n \"capability\": \"cap_net_raw+ep\",\r\n \"path\": \"/bin/ping\",\r\n \"state\": \"present\"\r\n }\r\n },\r\n \"msg\": \"Unable to get capabilities of /bin/ping\",\r\n \"stderr\": \"\",\r\n \"stderr_lines\": [],\r\n \"stdout\": \"/bin/ping cap_net_raw=ep\",\r\n \"stdout_lines\": [\r\n \"/bin/ping cap_net_raw=ep\"\r\n ]\r\n}\r\n```\r\n\r\n##### Additional information\r\n\r\nVersion 2.26:\r\n```\r\n\u279c touch capabilities_test\r\n\u279c sudo setcap cap_net_raw+ep capabilities_test\r\n\u279c getcap capabilities_test \r\ncapabilities_test = cap_net_raw+ep\r\n```\r\n\r\nVersion 2.43:\r\n```\r\n\u279c getcap capabilities_test \r\ncapabilities_test cap_net_raw=ep\r\n```\n", "before_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2014, Nate Coraor <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: capabilities\nshort_description: Manage Linux capabilities\ndescription:\n - This module manipulates files privileges using the Linux capabilities(7) system.\noptions:\n path:\n description:\n - Specifies the path to the file to be managed.\n type: str\n required: yes\n aliases: [ key ]\n capability:\n description:\n - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))\n type: str\n required: yes\n aliases: [ cap ]\n state:\n description:\n - Whether the entry should be present or absent in the file's capabilities.\n type: str\n choices: [ absent, present ]\n default: present\nnotes:\n - The capabilities system will automatically transform operators and flags into the effective set,\n so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).\n - This module does not attempt to determine the final operator and flags to compare,\n so you will want to ensure that your capabilities argument matches the final capabilities.\nauthor:\n- Nate Coraor (@natefoo)\n'''\n\nEXAMPLES = r'''\n- name: Set cap_sys_chroot+ep on /foo\n community.general.capabilities:\n path: /foo\n capability: cap_sys_chroot+ep\n state: present\n\n- name: Remove cap_net_bind_service from /bar\n community.general.capabilities:\n path: /bar\n capability: cap_net_bind_service\n state: absent\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nOPS = ('=', '-', '+')\n\n\nclass CapabilitiesModule(object):\n platform = 'Linux'\n distribution = None\n\n def __init__(self, module):\n self.module = module\n self.path = module.params['path'].strip()\n self.capability = module.params['capability'].strip().lower()\n self.state = module.params['state']\n self.getcap_cmd = module.get_bin_path('getcap', required=True)\n self.setcap_cmd = module.get_bin_path('setcap', required=True)\n self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')\n\n self.run()\n\n def run(self):\n\n current = self.getcap(self.path)\n caps = [cap[0] for cap in current]\n\n if self.state == 'present' and self.capability_tup not in current:\n # need to add capability\n if self.module.check_mode:\n self.module.exit_json(changed=True, msg='capabilities changed')\n else:\n # remove from current cap list if it's already set (but op/flags differ)\n current = list(filter(lambda x: x[0] != self.capability_tup[0], current))\n # add new cap with correct op/flags\n current.append(self.capability_tup)\n self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))\n elif self.state == 'absent' and self.capability_tup[0] in caps:\n # need to remove capability\n if self.module.check_mode:\n self.module.exit_json(changed=True, msg='capabilities changed')\n else:\n # remove from current cap list and then set current list\n current = filter(lambda x: x[0] != self.capability_tup[0], current)\n self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))\n self.module.exit_json(changed=False, state=self.state)\n\n def getcap(self, path):\n rval = []\n cmd = \"%s -v %s\" % (self.getcap_cmd, path)\n rc, stdout, stderr = self.module.run_command(cmd)\n # If file xattrs are set but no caps are set the output will be:\n # '/foo ='\n # If file xattrs are unset the output will be:\n # '/foo'\n # If the file does not exist the output will be (with rc == 0...):\n # '/foo (No such file or directory)'\n if rc != 0 or (stdout.strip() != path and stdout.count(' =') != 1):\n self.module.fail_json(msg=\"Unable to get capabilities of %s\" % path, stdout=stdout.strip(), stderr=stderr)\n if stdout.strip() != path:\n caps = stdout.split(' =')[1].strip().split()\n for cap in caps:\n cap = cap.lower()\n # getcap condenses capabilities with the same op/flags into a\n # comma-separated list, so we have to parse that\n if ',' in cap:\n cap_group = cap.split(',')\n cap_group[-1], op, flags = self._parse_cap(cap_group[-1])\n for subcap in cap_group:\n rval.append((subcap, op, flags))\n else:\n rval.append(self._parse_cap(cap))\n return rval\n\n def setcap(self, path, caps):\n caps = ' '.join([''.join(cap) for cap in caps])\n cmd = \"%s '%s' %s\" % (self.setcap_cmd, caps, path)\n rc, stdout, stderr = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg=\"Unable to set capabilities of %s\" % path, stdout=stdout, stderr=stderr)\n else:\n return stdout\n\n def _parse_cap(self, cap, op_required=True):\n opind = -1\n try:\n i = 0\n while opind == -1:\n opind = cap.find(OPS[i])\n i += 1\n except Exception:\n if op_required:\n self.module.fail_json(msg=\"Couldn't find operator (one of: %s)\" % str(OPS))\n else:\n return (cap, None, None)\n op = cap[opind]\n cap, flags = cap.split(op)\n return (cap, op, flags)\n\n\n# ==============================================================\n# main\n\ndef main():\n # defining module\n module = AnsibleModule(\n argument_spec=dict(\n path=dict(type='str', required=True, aliases=['key']),\n capability=dict(type='str', required=True, aliases=['cap']),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n ),\n supports_check_mode=True,\n )\n\n CapabilitiesModule(module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/capabilities.py"}], "after_files": [{"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2014, Nate Coraor <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: capabilities\nshort_description: Manage Linux capabilities\ndescription:\n - This module manipulates files privileges using the Linux capabilities(7) system.\noptions:\n path:\n description:\n - Specifies the path to the file to be managed.\n type: str\n required: yes\n aliases: [ key ]\n capability:\n description:\n - Desired capability to set (with operator and flags, if state is C(present)) or remove (if state is C(absent))\n type: str\n required: yes\n aliases: [ cap ]\n state:\n description:\n - Whether the entry should be present or absent in the file's capabilities.\n type: str\n choices: [ absent, present ]\n default: present\nnotes:\n - The capabilities system will automatically transform operators and flags into the effective set,\n so for example, C(cap_foo=ep) will probably become C(cap_foo+ep).\n - This module does not attempt to determine the final operator and flags to compare,\n so you will want to ensure that your capabilities argument matches the final capabilities.\nauthor:\n- Nate Coraor (@natefoo)\n'''\n\nEXAMPLES = r'''\n- name: Set cap_sys_chroot+ep on /foo\n community.general.capabilities:\n path: /foo\n capability: cap_sys_chroot+ep\n state: present\n\n- name: Remove cap_net_bind_service from /bar\n community.general.capabilities:\n path: /bar\n capability: cap_net_bind_service\n state: absent\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\n\nOPS = ('=', '-', '+')\n\n\nclass CapabilitiesModule(object):\n platform = 'Linux'\n distribution = None\n\n def __init__(self, module):\n self.module = module\n self.path = module.params['path'].strip()\n self.capability = module.params['capability'].strip().lower()\n self.state = module.params['state']\n self.getcap_cmd = module.get_bin_path('getcap', required=True)\n self.setcap_cmd = module.get_bin_path('setcap', required=True)\n self.capability_tup = self._parse_cap(self.capability, op_required=self.state == 'present')\n\n self.run()\n\n def run(self):\n\n current = self.getcap(self.path)\n caps = [cap[0] for cap in current]\n\n if self.state == 'present' and self.capability_tup not in current:\n # need to add capability\n if self.module.check_mode:\n self.module.exit_json(changed=True, msg='capabilities changed')\n else:\n # remove from current cap list if it's already set (but op/flags differ)\n current = list(filter(lambda x: x[0] != self.capability_tup[0], current))\n # add new cap with correct op/flags\n current.append(self.capability_tup)\n self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))\n elif self.state == 'absent' and self.capability_tup[0] in caps:\n # need to remove capability\n if self.module.check_mode:\n self.module.exit_json(changed=True, msg='capabilities changed')\n else:\n # remove from current cap list and then set current list\n current = filter(lambda x: x[0] != self.capability_tup[0], current)\n self.module.exit_json(changed=True, state=self.state, msg='capabilities changed', stdout=self.setcap(self.path, current))\n self.module.exit_json(changed=False, state=self.state)\n\n def getcap(self, path):\n rval = []\n cmd = \"%s -v %s\" % (self.getcap_cmd, path)\n rc, stdout, stderr = self.module.run_command(cmd)\n # If file xattrs are set but no caps are set the output will be:\n # '/foo ='\n # If file xattrs are unset the output will be:\n # '/foo'\n # If the file does not exist, the stderr will be (with rc == 0...):\n # '/foo (No such file or directory)'\n if rc != 0 or stderr != \"\":\n self.module.fail_json(msg=\"Unable to get capabilities of %s\" % path, stdout=stdout.strip(), stderr=stderr)\n if stdout.strip() != path:\n if ' =' in stdout:\n # process output of an older version of libcap\n caps = stdout.split(' =')[1].strip().split()\n else:\n # otherwise, we have a newer version here\n # see original commit message of cap/v0.2.40-18-g177cd41 in libcap.git\n caps = stdout.split()[1].strip().split()\n for cap in caps:\n cap = cap.lower()\n # getcap condenses capabilities with the same op/flags into a\n # comma-separated list, so we have to parse that\n if ',' in cap:\n cap_group = cap.split(',')\n cap_group[-1], op, flags = self._parse_cap(cap_group[-1])\n for subcap in cap_group:\n rval.append((subcap, op, flags))\n else:\n rval.append(self._parse_cap(cap))\n return rval\n\n def setcap(self, path, caps):\n caps = ' '.join([''.join(cap) for cap in caps])\n cmd = \"%s '%s' %s\" % (self.setcap_cmd, caps, path)\n rc, stdout, stderr = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg=\"Unable to set capabilities of %s\" % path, stdout=stdout, stderr=stderr)\n else:\n return stdout\n\n def _parse_cap(self, cap, op_required=True):\n opind = -1\n try:\n i = 0\n while opind == -1:\n opind = cap.find(OPS[i])\n i += 1\n except Exception:\n if op_required:\n self.module.fail_json(msg=\"Couldn't find operator (one of: %s)\" % str(OPS))\n else:\n return (cap, None, None)\n op = cap[opind]\n cap, flags = cap.split(op)\n return (cap, op, flags)\n\n\n# ==============================================================\n# main\n\ndef main():\n # defining module\n module = AnsibleModule(\n argument_spec=dict(\n path=dict(type='str', required=True, aliases=['key']),\n capability=dict(type='str', required=True, aliases=['cap']),\n state=dict(type='str', default='present', choices=['absent', 'present']),\n ),\n supports_check_mode=True,\n )\n\n CapabilitiesModule(module)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/system/capabilities.py"}]} | 3,277 | 346 |
gh_patches_debug_36577 | rasdani/github-patches | git_diff | geopandas__geopandas-1159 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spatial join error after using `explode()`
@jorisvandenbossche There is a specific situation that produces an error with `.sjoin()`.
I receive a following error:
```
File "C:\Hyapp\Anaconda3\lib\site-packages\pandas\core\indexes\base.py", line 1330, in set_names
raise TypeError("Must pass list-like as `names`.")
TypeError: Must pass list-like as `names`.
```
I was able to narrow this down, and the error only happens after using `df.explode()` function to turn MultiPolygons into multiple Polygons.
Here is an example to produce the error using some data from OSM:
```
# Test to produce an error
import osmnx as ox
import geopandas as gpd
buildings = ox.footprints_from_place("Punavuori, Finland")
buildings = buildings.explode()
landuse = ox.footprints_from_place("Punavuori, Finland", 'landuse')
join = gpd.sjoin(buildings, landuse)
```
The solution for fixing this was to `reset_index()` after using the `.explode()`. I am not sure if this is a bug or expected behavior, but wanted to report my findings anyway. 🙂
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geopandas/tools/sjoin.py`
Content:
```
1 from warnings import warn
2
3 import numpy as np
4 import pandas as pd
5
6 from shapely import prepared
7
8 from geopandas import GeoDataFrame
9
10
11 def sjoin(
12 left_df, right_df, how="inner", op="intersects", lsuffix="left", rsuffix="right"
13 ):
14 """Spatial join of two GeoDataFrames.
15
16 Parameters
17 ----------
18 left_df, right_df : GeoDataFrames
19 how : string, default 'inner'
20 The type of join:
21
22 * 'left': use keys from left_df; retain only left_df geometry column
23 * 'right': use keys from right_df; retain only right_df geometry column
24 * 'inner': use intersection of keys from both dfs; retain only
25 left_df geometry column
26 op : string, default 'intersection'
27 Binary predicate, one of {'intersects', 'contains', 'within'}.
28 See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.
29 lsuffix : string, default 'left'
30 Suffix to apply to overlapping column names (left GeoDataFrame).
31 rsuffix : string, default 'right'
32 Suffix to apply to overlapping column names (right GeoDataFrame).
33
34 """
35 if not isinstance(left_df, GeoDataFrame):
36 raise ValueError(
37 "'left_df' should be GeoDataFrame, got {}".format(type(left_df))
38 )
39
40 if not isinstance(right_df, GeoDataFrame):
41 raise ValueError(
42 "'right_df' should be GeoDataFrame, got {}".format(type(right_df))
43 )
44
45 allowed_hows = ["left", "right", "inner"]
46 if how not in allowed_hows:
47 raise ValueError(
48 '`how` was "%s" but is expected to be in %s' % (how, allowed_hows)
49 )
50
51 allowed_ops = ["contains", "within", "intersects"]
52 if op not in allowed_ops:
53 raise ValueError(
54 '`op` was "%s" but is expected to be in %s' % (op, allowed_ops)
55 )
56
57 if left_df.crs != right_df.crs:
58 warn(
59 (
60 "CRS of frames being joined does not match!"
61 "(%s != %s)" % (left_df.crs, right_df.crs)
62 )
63 )
64
65 index_left = "index_%s" % lsuffix
66 index_right = "index_%s" % rsuffix
67
68 # due to GH 352
69 if any(left_df.columns.isin([index_left, index_right])) or any(
70 right_df.columns.isin([index_left, index_right])
71 ):
72 raise ValueError(
73 "'{0}' and '{1}' cannot be names in the frames being"
74 " joined".format(index_left, index_right)
75 )
76
77 # Attempt to re-use spatial indexes, otherwise generate the spatial index
78 # for the longer dataframe
79 if right_df._sindex_generated or (
80 not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]
81 ):
82 tree_idx = right_df.sindex
83 tree_idx_right = True
84 else:
85 tree_idx = left_df.sindex
86 tree_idx_right = False
87
88 # the rtree spatial index only allows limited (numeric) index types, but an
89 # index in geopandas may be any arbitrary dtype. so reset both indices now
90 # and store references to the original indices, to be reaffixed later.
91 # GH 352
92 left_df = left_df.copy(deep=True)
93 left_index_name = left_df.index.name
94 left_df.index = left_df.index.rename(index_left)
95 left_df = left_df.reset_index()
96 right_df = right_df.copy(deep=True)
97 right_index_name = right_df.index.name
98 right_df.index = right_df.index.rename(index_right)
99 right_df = right_df.reset_index()
100
101 if op == "within":
102 # within implemented as the inverse of contains; swap names
103 left_df, right_df = right_df, left_df
104 tree_idx_right = not tree_idx_right
105
106 r_idx = np.empty((0, 0))
107 l_idx = np.empty((0, 0))
108 # get rtree spatial index
109 if tree_idx_right:
110 idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(
111 lambda x: list(tree_idx.intersection(x)) if not x == () else []
112 )
113 idxmatch = idxmatch[idxmatch.apply(len) > 0]
114 # indexes of overlapping boundaries
115 if idxmatch.shape[0] > 0:
116 r_idx = np.concatenate(idxmatch.values)
117 l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
118 else:
119 # tree_idx_df == 'left'
120 idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(
121 lambda x: list(tree_idx.intersection(x)) if not x == () else []
122 )
123 idxmatch = idxmatch[idxmatch.apply(len) > 0]
124 if idxmatch.shape[0] > 0:
125 # indexes of overlapping boundaries
126 l_idx = np.concatenate(idxmatch.values)
127 r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])
128
129 if len(r_idx) > 0 and len(l_idx) > 0:
130 # Vectorize predicate operations
131 def find_intersects(a1, a2):
132 return a1.intersects(a2)
133
134 def find_contains(a1, a2):
135 return a1.contains(a2)
136
137 predicate_d = {
138 "intersects": find_intersects,
139 "contains": find_contains,
140 "within": find_contains,
141 }
142
143 check_predicates = np.vectorize(predicate_d[op])
144
145 result = pd.DataFrame(
146 np.column_stack(
147 [
148 l_idx,
149 r_idx,
150 check_predicates(
151 left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],
152 right_df[right_df.geometry.name][r_idx],
153 ),
154 ]
155 )
156 )
157
158 result.columns = ["_key_left", "_key_right", "match_bool"]
159 result = pd.DataFrame(result[result["match_bool"] == 1]).drop(
160 "match_bool", axis=1
161 )
162
163 else:
164 # when output from the join has no overlapping geometries
165 result = pd.DataFrame(columns=["_key_left", "_key_right"], dtype=float)
166
167 if op == "within":
168 # within implemented as the inverse of contains; swap names
169 left_df, right_df = right_df, left_df
170 result = result.rename(
171 columns={"_key_left": "_key_right", "_key_right": "_key_left"}
172 )
173
174 if how == "inner":
175 result = result.set_index("_key_left")
176 joined = (
177 left_df.merge(result, left_index=True, right_index=True)
178 .merge(
179 right_df.drop(right_df.geometry.name, axis=1),
180 left_on="_key_right",
181 right_index=True,
182 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
183 )
184 .set_index(index_left)
185 .drop(["_key_right"], axis=1)
186 )
187 joined.index.name = left_index_name
188
189 elif how == "left":
190 result = result.set_index("_key_left")
191 joined = (
192 left_df.merge(result, left_index=True, right_index=True, how="left")
193 .merge(
194 right_df.drop(right_df.geometry.name, axis=1),
195 how="left",
196 left_on="_key_right",
197 right_index=True,
198 suffixes=("_%s" % lsuffix, "_%s" % rsuffix),
199 )
200 .set_index(index_left)
201 .drop(["_key_right"], axis=1)
202 )
203 joined.index.name = left_index_name
204
205 else: # how == 'right':
206 joined = (
207 left_df.drop(left_df.geometry.name, axis=1)
208 .merge(
209 result.merge(
210 right_df, left_on="_key_right", right_index=True, how="right"
211 ),
212 left_index=True,
213 right_on="_key_left",
214 how="right",
215 )
216 .set_index(index_right)
217 .drop(["_key_left", "_key_right"], axis=1)
218 )
219 joined.index.name = right_index_name
220
221 return joined
222
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py
--- a/geopandas/tools/sjoin.py
+++ b/geopandas/tools/sjoin.py
@@ -90,12 +90,27 @@
# and store references to the original indices, to be reaffixed later.
# GH 352
left_df = left_df.copy(deep=True)
- left_index_name = left_df.index.name
- left_df.index = left_df.index.rename(index_left)
+ try:
+ left_index_name = left_df.index.name
+ left_df.index = left_df.index.rename(index_left)
+ except TypeError:
+ index_left = [
+ "index_%s" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)
+ ]
+ left_index_name = left_df.index.names
+ left_df.index = left_df.index.rename(index_left)
left_df = left_df.reset_index()
+
right_df = right_df.copy(deep=True)
- right_index_name = right_df.index.name
- right_df.index = right_df.index.rename(index_right)
+ try:
+ right_index_name = right_df.index.name
+ right_df.index = right_df.index.rename(index_right)
+ except TypeError:
+ index_right = [
+ "index_%s" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)
+ ]
+ right_index_name = right_df.index.names
+ right_df.index = right_df.index.rename(index_right)
right_df = right_df.reset_index()
if op == "within":
@@ -184,7 +199,10 @@
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
- joined.index.name = left_index_name
+ if isinstance(index_left, list):
+ joined.index.names = left_index_name
+ else:
+ joined.index.name = left_index_name
elif how == "left":
result = result.set_index("_key_left")
@@ -200,7 +218,10 @@
.set_index(index_left)
.drop(["_key_right"], axis=1)
)
- joined.index.name = left_index_name
+ if isinstance(index_left, list):
+ joined.index.names = left_index_name
+ else:
+ joined.index.name = left_index_name
else: # how == 'right':
joined = (
@@ -216,6 +237,9 @@
.set_index(index_right)
.drop(["_key_left", "_key_right"], axis=1)
)
- joined.index.name = right_index_name
+ if isinstance(index_right, list):
+ joined.index.names = right_index_name
+ else:
+ joined.index.name = right_index_name
return joined
| {"golden_diff": "diff --git a/geopandas/tools/sjoin.py b/geopandas/tools/sjoin.py\n--- a/geopandas/tools/sjoin.py\n+++ b/geopandas/tools/sjoin.py\n@@ -90,12 +90,27 @@\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n- left_index_name = left_df.index.name\n- left_df.index = left_df.index.rename(index_left)\n+ try:\n+ left_index_name = left_df.index.name\n+ left_df.index = left_df.index.rename(index_left)\n+ except TypeError:\n+ index_left = [\n+ \"index_%s\" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)\n+ ]\n+ left_index_name = left_df.index.names\n+ left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n+\n right_df = right_df.copy(deep=True)\n- right_index_name = right_df.index.name\n- right_df.index = right_df.index.rename(index_right)\n+ try:\n+ right_index_name = right_df.index.name\n+ right_df.index = right_df.index.rename(index_right)\n+ except TypeError:\n+ index_right = [\n+ \"index_%s\" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)\n+ ]\n+ right_index_name = right_df.index.names\n+ right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n \n if op == \"within\":\n@@ -184,7 +199,10 @@\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n- joined.index.name = left_index_name\n+ if isinstance(index_left, list):\n+ joined.index.names = left_index_name\n+ else:\n+ joined.index.name = left_index_name\n \n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n@@ -200,7 +218,10 @@\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n- joined.index.name = left_index_name\n+ if isinstance(index_left, list):\n+ joined.index.names = left_index_name\n+ else:\n+ joined.index.name = left_index_name\n \n else: # how == 'right':\n joined = (\n@@ -216,6 +237,9 @@\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n- joined.index.name = right_index_name\n+ if isinstance(index_right, list):\n+ joined.index.names = right_index_name\n+ else:\n+ joined.index.name = right_index_name\n \n return joined\n", "issue": "Spatial join error after using `explode()`\n@jorisvandenbossche There is a specific situation that produces an error with `.sjoin()`. \r\n\r\nI receive a following error: \r\n```\r\n File \"C:\\Hyapp\\Anaconda3\\lib\\site-packages\\pandas\\core\\indexes\\base.py\", line 1330, in set_names\r\n raise TypeError(\"Must pass list-like as `names`.\")\r\n\r\nTypeError: Must pass list-like as `names`.\r\n```\r\n\r\nI was able to narrow this down, and the error only happens after using `df.explode()` function to turn MultiPolygons into multiple Polygons. \r\n\r\nHere is an example to produce the error using some data from OSM: \r\n```\r\n# Test to produce an error\r\nimport osmnx as ox\r\nimport geopandas as gpd\r\nbuildings = ox.footprints_from_place(\"Punavuori, Finland\")\r\nbuildings = buildings.explode()\r\nlanduse = ox.footprints_from_place(\"Punavuori, Finland\", 'landuse')\r\njoin = gpd.sjoin(buildings, landuse)\r\n```\r\n\r\nThe solution for fixing this was to `reset_index()` after using the `.explode()`. I am not sure if this is a bug or expected behavior, but wanted to report my findings anyway. \ud83d\ude42 \r\n\n", "before_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n left_index_name = left_df.index.name\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n right_df = right_df.copy(deep=True)\n right_index_name = right_df.index.name\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True)\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n joined.index.name = left_index_name\n\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True, how=\"left\")\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n joined.index.name = left_index_name\n\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n joined.index.name = right_index_name\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}], "after_files": [{"content": "from warnings import warn\n\nimport numpy as np\nimport pandas as pd\n\nfrom shapely import prepared\n\nfrom geopandas import GeoDataFrame\n\n\ndef sjoin(\n left_df, right_df, how=\"inner\", op=\"intersects\", lsuffix=\"left\", rsuffix=\"right\"\n):\n \"\"\"Spatial join of two GeoDataFrames.\n\n Parameters\n ----------\n left_df, right_df : GeoDataFrames\n how : string, default 'inner'\n The type of join:\n\n * 'left': use keys from left_df; retain only left_df geometry column\n * 'right': use keys from right_df; retain only right_df geometry column\n * 'inner': use intersection of keys from both dfs; retain only\n left_df geometry column\n op : string, default 'intersection'\n Binary predicate, one of {'intersects', 'contains', 'within'}.\n See http://shapely.readthedocs.io/en/latest/manual.html#binary-predicates.\n lsuffix : string, default 'left'\n Suffix to apply to overlapping column names (left GeoDataFrame).\n rsuffix : string, default 'right'\n Suffix to apply to overlapping column names (right GeoDataFrame).\n\n \"\"\"\n if not isinstance(left_df, GeoDataFrame):\n raise ValueError(\n \"'left_df' should be GeoDataFrame, got {}\".format(type(left_df))\n )\n\n if not isinstance(right_df, GeoDataFrame):\n raise ValueError(\n \"'right_df' should be GeoDataFrame, got {}\".format(type(right_df))\n )\n\n allowed_hows = [\"left\", \"right\", \"inner\"]\n if how not in allowed_hows:\n raise ValueError(\n '`how` was \"%s\" but is expected to be in %s' % (how, allowed_hows)\n )\n\n allowed_ops = [\"contains\", \"within\", \"intersects\"]\n if op not in allowed_ops:\n raise ValueError(\n '`op` was \"%s\" but is expected to be in %s' % (op, allowed_ops)\n )\n\n if left_df.crs != right_df.crs:\n warn(\n (\n \"CRS of frames being joined does not match!\"\n \"(%s != %s)\" % (left_df.crs, right_df.crs)\n )\n )\n\n index_left = \"index_%s\" % lsuffix\n index_right = \"index_%s\" % rsuffix\n\n # due to GH 352\n if any(left_df.columns.isin([index_left, index_right])) or any(\n right_df.columns.isin([index_left, index_right])\n ):\n raise ValueError(\n \"'{0}' and '{1}' cannot be names in the frames being\"\n \" joined\".format(index_left, index_right)\n )\n\n # Attempt to re-use spatial indexes, otherwise generate the spatial index\n # for the longer dataframe\n if right_df._sindex_generated or (\n not left_df._sindex_generated and right_df.shape[0] > left_df.shape[0]\n ):\n tree_idx = right_df.sindex\n tree_idx_right = True\n else:\n tree_idx = left_df.sindex\n tree_idx_right = False\n\n # the rtree spatial index only allows limited (numeric) index types, but an\n # index in geopandas may be any arbitrary dtype. so reset both indices now\n # and store references to the original indices, to be reaffixed later.\n # GH 352\n left_df = left_df.copy(deep=True)\n try:\n left_index_name = left_df.index.name\n left_df.index = left_df.index.rename(index_left)\n except TypeError:\n index_left = [\n \"index_%s\" % lsuffix + str(l) for l, ix in enumerate(left_df.index.names)\n ]\n left_index_name = left_df.index.names\n left_df.index = left_df.index.rename(index_left)\n left_df = left_df.reset_index()\n\n right_df = right_df.copy(deep=True)\n try:\n right_index_name = right_df.index.name\n right_df.index = right_df.index.rename(index_right)\n except TypeError:\n index_right = [\n \"index_%s\" % rsuffix + str(l) for l, ix in enumerate(right_df.index.names)\n ]\n right_index_name = right_df.index.names\n right_df.index = right_df.index.rename(index_right)\n right_df = right_df.reset_index()\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n tree_idx_right = not tree_idx_right\n\n r_idx = np.empty((0, 0))\n l_idx = np.empty((0, 0))\n # get rtree spatial index\n if tree_idx_right:\n idxmatch = left_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n # indexes of overlapping boundaries\n if idxmatch.shape[0] > 0:\n r_idx = np.concatenate(idxmatch.values)\n l_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n else:\n # tree_idx_df == 'left'\n idxmatch = right_df.geometry.apply(lambda x: x.bounds).apply(\n lambda x: list(tree_idx.intersection(x)) if not x == () else []\n )\n idxmatch = idxmatch[idxmatch.apply(len) > 0]\n if idxmatch.shape[0] > 0:\n # indexes of overlapping boundaries\n l_idx = np.concatenate(idxmatch.values)\n r_idx = np.concatenate([[i] * len(v) for i, v in idxmatch.iteritems()])\n\n if len(r_idx) > 0 and len(l_idx) > 0:\n # Vectorize predicate operations\n def find_intersects(a1, a2):\n return a1.intersects(a2)\n\n def find_contains(a1, a2):\n return a1.contains(a2)\n\n predicate_d = {\n \"intersects\": find_intersects,\n \"contains\": find_contains,\n \"within\": find_contains,\n }\n\n check_predicates = np.vectorize(predicate_d[op])\n\n result = pd.DataFrame(\n np.column_stack(\n [\n l_idx,\n r_idx,\n check_predicates(\n left_df.geometry.apply(lambda x: prepared.prep(x))[l_idx],\n right_df[right_df.geometry.name][r_idx],\n ),\n ]\n )\n )\n\n result.columns = [\"_key_left\", \"_key_right\", \"match_bool\"]\n result = pd.DataFrame(result[result[\"match_bool\"] == 1]).drop(\n \"match_bool\", axis=1\n )\n\n else:\n # when output from the join has no overlapping geometries\n result = pd.DataFrame(columns=[\"_key_left\", \"_key_right\"], dtype=float)\n\n if op == \"within\":\n # within implemented as the inverse of contains; swap names\n left_df, right_df = right_df, left_df\n result = result.rename(\n columns={\"_key_left\": \"_key_right\", \"_key_right\": \"_key_left\"}\n )\n\n if how == \"inner\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True)\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n if isinstance(index_left, list):\n joined.index.names = left_index_name\n else:\n joined.index.name = left_index_name\n\n elif how == \"left\":\n result = result.set_index(\"_key_left\")\n joined = (\n left_df.merge(result, left_index=True, right_index=True, how=\"left\")\n .merge(\n right_df.drop(right_df.geometry.name, axis=1),\n how=\"left\",\n left_on=\"_key_right\",\n right_index=True,\n suffixes=(\"_%s\" % lsuffix, \"_%s\" % rsuffix),\n )\n .set_index(index_left)\n .drop([\"_key_right\"], axis=1)\n )\n if isinstance(index_left, list):\n joined.index.names = left_index_name\n else:\n joined.index.name = left_index_name\n\n else: # how == 'right':\n joined = (\n left_df.drop(left_df.geometry.name, axis=1)\n .merge(\n result.merge(\n right_df, left_on=\"_key_right\", right_index=True, how=\"right\"\n ),\n left_index=True,\n right_on=\"_key_left\",\n how=\"right\",\n )\n .set_index(index_right)\n .drop([\"_key_left\", \"_key_right\"], axis=1)\n )\n if isinstance(index_right, list):\n joined.index.names = right_index_name\n else:\n joined.index.name = right_index_name\n\n return joined\n", "path": "geopandas/tools/sjoin.py"}]} | 2,903 | 632 |
gh_patches_debug_25584 | rasdani/github-patches | git_diff | openvinotoolkit__datumaro-375 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot import captions with double quotes ICDAR Word Recognition
The original ICDAR 13/15 dataset for word recognition task contains captions with double quotes (e.g "READER\\"").
When I try to load this dataset, Datumaro throws an exception with the message:
`Line word_136.png, "(412\")": unexpected number of quotes in filename`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `datumaro/plugins/icdar_format/extractor.py`
Content:
```
1 # Copyright (C) 2020 Intel Corporation
2 #
3 # SPDX-License-Identifier: MIT
4
5 from glob import iglob
6 import os.path as osp
7
8 import numpy as np
9
10 from datumaro.components.extractor import (
11 Bbox, Caption, DatasetItem, Importer, Mask, MaskCategories, Polygon,
12 SourceExtractor,
13 )
14 from datumaro.util.image import find_images
15 from datumaro.util.mask_tools import lazy_mask
16
17 from .format import IcdarPath, IcdarTask
18
19
20 class _IcdarExtractor(SourceExtractor):
21 def __init__(self, path, task, subset=None):
22 self._path = path
23 self._task = task
24
25 if task is IcdarTask.word_recognition:
26 if not osp.isfile(path):
27 raise FileNotFoundError(
28 "Can't read annotation file '%s'" % path)
29
30 if not subset:
31 subset = osp.basename(osp.dirname(path))
32 super().__init__(subset=subset)
33
34 self._dataset_dir = osp.dirname(osp.dirname(path))
35
36 self._items = list(self._load_recognition_items().values())
37 elif task in {IcdarTask.text_localization, IcdarTask.text_segmentation}:
38 if not osp.isdir(path):
39 raise NotADirectoryError(
40 "Can't open folder with annotation files '%s'" % path)
41
42 if not subset:
43 subset = osp.basename(path)
44 super().__init__(subset=subset)
45
46 self._dataset_dir = osp.dirname(path)
47
48 if task is IcdarTask.text_localization:
49 self._items = list(self._load_localization_items().values())
50 else:
51 self._items = list(self._load_segmentation_items().values())
52
53 def _load_recognition_items(self):
54 items = {}
55
56 with open(self._path, encoding='utf-8') as f:
57 for line in f:
58 line = line.strip()
59 objects = line.split(', ')
60 if len(objects) == 2:
61 image = objects[0]
62 objects = objects[1].split('\"')
63 if 1 < len(objects):
64 if len(objects) % 2:
65 captions = [objects[2 * i + 1]
66 for i in range(int(len(objects) / 2))]
67 else:
68 raise Exception("Line %s: unexpected number "
69 "of quotes in filename" % line)
70 else:
71 captions = objects[0].split()
72 else:
73 image = objects[0][:-1]
74 captions = []
75
76 item_id = osp.splitext(image)[0]
77 image_path = osp.join(osp.dirname(self._path),
78 IcdarPath.IMAGES_DIR, image)
79 if item_id not in items:
80 items[item_id] = DatasetItem(item_id, subset=self._subset,
81 image=image_path)
82
83 annotations = items[item_id].annotations
84 for caption in captions:
85 annotations.append(Caption(caption))
86
87 return items
88
89 def _load_localization_items(self):
90 items = {}
91
92 image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
93 if osp.isdir(image_dir):
94 images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
95 for p in find_images(image_dir, recursive=True) }
96 else:
97 images = {}
98
99 for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
100 item_id = osp.splitext(osp.relpath(path, self._path))[0]
101 if osp.basename(item_id).startswith('gt_'):
102 item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])
103 item_id = item_id.replace('\\', '/')
104
105 if item_id not in items:
106 items[item_id] = DatasetItem(item_id, subset=self._subset,
107 image=images.get(item_id))
108 annotations = items[item_id].annotations
109
110 with open(path, encoding='utf-8') as f:
111 for line in f:
112 line = line.strip()
113 objects = line.split('\"')
114 if 1 < len(objects):
115 if len(objects) == 3:
116 text = objects[1]
117 else:
118 raise Exception("Line %s: unexpected number "
119 "of quotes in filename" % line)
120 else:
121 text = ''
122 objects = objects[0].split()
123 if len(objects) == 1:
124 objects = objects[0].split(',')
125
126 if 8 <= len(objects):
127 points = [float(p) for p in objects[:8]]
128
129 attributes = {}
130 if 0 < len(text):
131 attributes['text'] = text
132 elif len(objects) == 9:
133 text = objects[8]
134 attributes['text'] = text
135
136 annotations.append(
137 Polygon(points, attributes=attributes))
138 elif 4 <= len(objects):
139 x = float(objects[0])
140 y = float(objects[1])
141 w = float(objects[2]) - x
142 h = float(objects[3]) - y
143
144 attributes = {}
145 if 0 < len(text):
146 attributes['text'] = text
147 elif len(objects) == 5:
148 text = objects[4]
149 attributes['text'] = text
150
151 annotations.append(
152 Bbox(x, y, w, h, attributes=attributes))
153 return items
154
155 def _load_segmentation_items(self):
156 items = {}
157
158 image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)
159 if osp.isdir(image_dir):
160 images = { osp.splitext(osp.relpath(p, image_dir))[0]: p
161 for p in find_images(image_dir, recursive=True) }
162 else:
163 images = {}
164
165 for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):
166 item_id = osp.splitext(osp.relpath(path, self._path))[0]
167 item_id = item_id.replace('\\', '/')
168 if item_id.endswith('_GT'):
169 item_id = item_id[:-3]
170
171 if item_id not in items:
172 items[item_id] = DatasetItem(item_id, subset=self._subset,
173 image=images.get(item_id))
174 annotations = items[item_id].annotations
175
176 colors = [(255, 255, 255)]
177 chars = ['']
178 centers = [0]
179 groups = [0]
180 group = 1
181 number_in_group = 0
182 with open(path, encoding='utf-8') as f:
183 for line in f:
184 line = line.strip()
185 if line == '':
186 if number_in_group == 1:
187 groups[len(groups) - 1] = 0
188 else:
189 group += 1
190 number_in_group = 0
191 continue
192
193 objects = line.split()
194 if objects[0][0] == '#':
195 objects[0] = objects[0][1:]
196 objects[9] = '\" \"'
197 objects.pop()
198 if len(objects) != 10:
199 raise Exception("Line %s contains the wrong number "
200 "of arguments, e.g. '241 73 144 1 4 0 3 1 4 \"h\"" % line)
201
202 centers.append(objects[3] + ' ' + objects[4])
203 groups.append(group)
204 colors.append(tuple(int(o) for o in objects[:3]))
205 char = objects[9]
206 if char[0] == '\"' and char[-1] == '\"':
207 char = char[1:-1]
208 chars.append(char)
209 number_in_group += 1
210 if number_in_group == 1:
211 groups[len(groups) - 1] = 0
212
213 mask_categories = MaskCategories(
214 {i: colors[i] for i in range(len(colors))})
215 inverse_cls_colormap = mask_categories.inverse_colormap
216
217 gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)
218 if osp.isfile(gt_path):
219 # load mask through cache
220 mask = lazy_mask(gt_path, inverse_cls_colormap)
221 mask = mask()
222
223 classes = np.unique(mask)
224 for label_id in classes:
225 if label_id == 0:
226 continue
227 i = int(label_id)
228 annotations.append(Mask(group=groups[i],
229 image=self._lazy_extract_mask(mask, label_id),
230 attributes={ 'index': i - 1,
231 'color': ' '.join(str(p) for p in colors[i]),
232 'text': chars[i], 'center': centers[i] }
233 ))
234 return items
235
236 @staticmethod
237 def _lazy_extract_mask(mask, c):
238 return lambda: mask == c
239
240 class IcdarWordRecognitionExtractor(_IcdarExtractor):
241 def __init__(self, path, **kwargs):
242 kwargs['task'] = IcdarTask.word_recognition
243 super().__init__(path, **kwargs)
244
245 class IcdarTextLocalizationExtractor(_IcdarExtractor):
246 def __init__(self, path, **kwargs):
247 kwargs['task'] = IcdarTask.text_localization
248 super().__init__(path, **kwargs)
249
250 class IcdarTextSegmentationExtractor(_IcdarExtractor):
251 def __init__(self, path, **kwargs):
252 kwargs['task'] = IcdarTask.text_segmentation
253 super().__init__(path, **kwargs)
254
255
256 class IcdarWordRecognitionImporter(Importer):
257 @classmethod
258 def find_sources(cls, path):
259 return cls._find_sources_recursive(path, '.txt', 'icdar_word_recognition')
260
261 class IcdarTextLocalizationImporter(Importer):
262 @classmethod
263 def find_sources(cls, path):
264 return cls._find_sources_recursive(path, '', 'icdar_text_localization')
265
266 class IcdarTextSegmentationImporter(Importer):
267 @classmethod
268 def find_sources(cls, path):
269 return cls._find_sources_recursive(path, '', 'icdar_text_segmentation')
270
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/datumaro/plugins/icdar_format/extractor.py b/datumaro/plugins/icdar_format/extractor.py
--- a/datumaro/plugins/icdar_format/extractor.py
+++ b/datumaro/plugins/icdar_format/extractor.py
@@ -3,6 +3,7 @@
# SPDX-License-Identifier: MIT
from glob import iglob
+import logging as log
import os.path as osp
import numpy as np
@@ -59,16 +60,13 @@
objects = line.split(', ')
if len(objects) == 2:
image = objects[0]
- objects = objects[1].split('\"')
- if 1 < len(objects):
- if len(objects) % 2:
- captions = [objects[2 * i + 1]
- for i in range(int(len(objects) / 2))]
+ captions = []
+ for caption in objects[1:]:
+ if caption[0] != '\"' or caption[-1] != '\"':
+ log.warning("Line %s: unexpected number "
+ "of quotes" % line)
else:
- raise Exception("Line %s: unexpected number "
- "of quotes in filename" % line)
- else:
- captions = objects[0].split()
+ captions.append(caption.replace('\\', '')[1:-1])
else:
image = objects[0][:-1]
captions = []
| {"golden_diff": "diff --git a/datumaro/plugins/icdar_format/extractor.py b/datumaro/plugins/icdar_format/extractor.py\n--- a/datumaro/plugins/icdar_format/extractor.py\n+++ b/datumaro/plugins/icdar_format/extractor.py\n@@ -3,6 +3,7 @@\n # SPDX-License-Identifier: MIT\n \n from glob import iglob\n+import logging as log\n import os.path as osp\n \n import numpy as np\n@@ -59,16 +60,13 @@\n objects = line.split(', ')\n if len(objects) == 2:\n image = objects[0]\n- objects = objects[1].split('\\\"')\n- if 1 < len(objects):\n- if len(objects) % 2:\n- captions = [objects[2 * i + 1]\n- for i in range(int(len(objects) / 2))]\n+ captions = []\n+ for caption in objects[1:]:\n+ if caption[0] != '\\\"' or caption[-1] != '\\\"':\n+ log.warning(\"Line %s: unexpected number \"\n+ \"of quotes\" % line)\n else:\n- raise Exception(\"Line %s: unexpected number \"\n- \"of quotes in filename\" % line)\n- else:\n- captions = objects[0].split()\n+ captions.append(caption.replace('\\\\', '')[1:-1])\n else:\n image = objects[0][:-1]\n captions = []\n", "issue": "Cannot import captions with double quotes ICDAR Word Recognition\nThe original ICDAR 13/15 dataset for word recognition task contains captions with double quotes (e.g \"READER\\\\\"\").\r\nWhen I try to load this dataset, Datumaro throws an exception with the message:\r\n`Line word_136.png, \"(412\\\")\": unexpected number of quotes in filename`.\n", "before_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom glob import iglob\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.components.extractor import (\n Bbox, Caption, DatasetItem, Importer, Mask, MaskCategories, Polygon,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\nfrom datumaro.util.mask_tools import lazy_mask\n\nfrom .format import IcdarPath, IcdarTask\n\n\nclass _IcdarExtractor(SourceExtractor):\n def __init__(self, path, task, subset=None):\n self._path = path\n self._task = task\n\n if task is IcdarTask.word_recognition:\n if not osp.isfile(path):\n raise FileNotFoundError(\n \"Can't read annotation file '%s'\" % path)\n\n if not subset:\n subset = osp.basename(osp.dirname(path))\n super().__init__(subset=subset)\n\n self._dataset_dir = osp.dirname(osp.dirname(path))\n\n self._items = list(self._load_recognition_items().values())\n elif task in {IcdarTask.text_localization, IcdarTask.text_segmentation}:\n if not osp.isdir(path):\n raise NotADirectoryError(\n \"Can't open folder with annotation files '%s'\" % path)\n\n if not subset:\n subset = osp.basename(path)\n super().__init__(subset=subset)\n\n self._dataset_dir = osp.dirname(path)\n\n if task is IcdarTask.text_localization:\n self._items = list(self._load_localization_items().values())\n else:\n self._items = list(self._load_segmentation_items().values())\n\n def _load_recognition_items(self):\n items = {}\n\n with open(self._path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n objects = line.split(', ')\n if len(objects) == 2:\n image = objects[0]\n objects = objects[1].split('\\\"')\n if 1 < len(objects):\n if len(objects) % 2:\n captions = [objects[2 * i + 1]\n for i in range(int(len(objects) / 2))]\n else:\n raise Exception(\"Line %s: unexpected number \"\n \"of quotes in filename\" % line)\n else:\n captions = objects[0].split()\n else:\n image = objects[0][:-1]\n captions = []\n\n item_id = osp.splitext(image)[0]\n image_path = osp.join(osp.dirname(self._path),\n IcdarPath.IMAGES_DIR, image)\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=image_path)\n\n annotations = items[item_id].annotations\n for caption in captions:\n annotations.append(Caption(caption))\n\n return items\n\n def _load_localization_items(self):\n items = {}\n\n image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)\n if osp.isdir(image_dir):\n images = { osp.splitext(osp.relpath(p, image_dir))[0]: p\n for p in find_images(image_dir, recursive=True) }\n else:\n images = {}\n\n for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):\n item_id = osp.splitext(osp.relpath(path, self._path))[0]\n if osp.basename(item_id).startswith('gt_'):\n item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])\n item_id = item_id.replace('\\\\', '/')\n\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=images.get(item_id))\n annotations = items[item_id].annotations\n\n with open(path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n objects = line.split('\\\"')\n if 1 < len(objects):\n if len(objects) == 3:\n text = objects[1]\n else:\n raise Exception(\"Line %s: unexpected number \"\n \"of quotes in filename\" % line)\n else:\n text = ''\n objects = objects[0].split()\n if len(objects) == 1:\n objects = objects[0].split(',')\n\n if 8 <= len(objects):\n points = [float(p) for p in objects[:8]]\n\n attributes = {}\n if 0 < len(text):\n attributes['text'] = text\n elif len(objects) == 9:\n text = objects[8]\n attributes['text'] = text\n\n annotations.append(\n Polygon(points, attributes=attributes))\n elif 4 <= len(objects):\n x = float(objects[0])\n y = float(objects[1])\n w = float(objects[2]) - x\n h = float(objects[3]) - y\n\n attributes = {}\n if 0 < len(text):\n attributes['text'] = text\n elif len(objects) == 5:\n text = objects[4]\n attributes['text'] = text\n\n annotations.append(\n Bbox(x, y, w, h, attributes=attributes))\n return items\n\n def _load_segmentation_items(self):\n items = {}\n\n image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)\n if osp.isdir(image_dir):\n images = { osp.splitext(osp.relpath(p, image_dir))[0]: p\n for p in find_images(image_dir, recursive=True) }\n else:\n images = {}\n\n for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):\n item_id = osp.splitext(osp.relpath(path, self._path))[0]\n item_id = item_id.replace('\\\\', '/')\n if item_id.endswith('_GT'):\n item_id = item_id[:-3]\n\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=images.get(item_id))\n annotations = items[item_id].annotations\n\n colors = [(255, 255, 255)]\n chars = ['']\n centers = [0]\n groups = [0]\n group = 1\n number_in_group = 0\n with open(path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == '':\n if number_in_group == 1:\n groups[len(groups) - 1] = 0\n else:\n group += 1\n number_in_group = 0\n continue\n\n objects = line.split()\n if objects[0][0] == '#':\n objects[0] = objects[0][1:]\n objects[9] = '\\\" \\\"'\n objects.pop()\n if len(objects) != 10:\n raise Exception(\"Line %s contains the wrong number \"\n \"of arguments, e.g. '241 73 144 1 4 0 3 1 4 \\\"h\\\"\" % line)\n\n centers.append(objects[3] + ' ' + objects[4])\n groups.append(group)\n colors.append(tuple(int(o) for o in objects[:3]))\n char = objects[9]\n if char[0] == '\\\"' and char[-1] == '\\\"':\n char = char[1:-1]\n chars.append(char)\n number_in_group += 1\n if number_in_group == 1:\n groups[len(groups) - 1] = 0\n\n mask_categories = MaskCategories(\n {i: colors[i] for i in range(len(colors))})\n inverse_cls_colormap = mask_categories.inverse_colormap\n\n gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)\n if osp.isfile(gt_path):\n # load mask through cache\n mask = lazy_mask(gt_path, inverse_cls_colormap)\n mask = mask()\n\n classes = np.unique(mask)\n for label_id in classes:\n if label_id == 0:\n continue\n i = int(label_id)\n annotations.append(Mask(group=groups[i],\n image=self._lazy_extract_mask(mask, label_id),\n attributes={ 'index': i - 1,\n 'color': ' '.join(str(p) for p in colors[i]),\n 'text': chars[i], 'center': centers[i] }\n ))\n return items\n\n @staticmethod\n def _lazy_extract_mask(mask, c):\n return lambda: mask == c\n\nclass IcdarWordRecognitionExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.word_recognition\n super().__init__(path, **kwargs)\n\nclass IcdarTextLocalizationExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.text_localization\n super().__init__(path, **kwargs)\n\nclass IcdarTextSegmentationExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.text_segmentation\n super().__init__(path, **kwargs)\n\n\nclass IcdarWordRecognitionImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '.txt', 'icdar_word_recognition')\n\nclass IcdarTextLocalizationImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '', 'icdar_text_localization')\n\nclass IcdarTextSegmentationImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '', 'icdar_text_segmentation')\n", "path": "datumaro/plugins/icdar_format/extractor.py"}], "after_files": [{"content": "# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom glob import iglob\nimport logging as log\nimport os.path as osp\n\nimport numpy as np\n\nfrom datumaro.components.extractor import (\n Bbox, Caption, DatasetItem, Importer, Mask, MaskCategories, Polygon,\n SourceExtractor,\n)\nfrom datumaro.util.image import find_images\nfrom datumaro.util.mask_tools import lazy_mask\n\nfrom .format import IcdarPath, IcdarTask\n\n\nclass _IcdarExtractor(SourceExtractor):\n def __init__(self, path, task, subset=None):\n self._path = path\n self._task = task\n\n if task is IcdarTask.word_recognition:\n if not osp.isfile(path):\n raise FileNotFoundError(\n \"Can't read annotation file '%s'\" % path)\n\n if not subset:\n subset = osp.basename(osp.dirname(path))\n super().__init__(subset=subset)\n\n self._dataset_dir = osp.dirname(osp.dirname(path))\n\n self._items = list(self._load_recognition_items().values())\n elif task in {IcdarTask.text_localization, IcdarTask.text_segmentation}:\n if not osp.isdir(path):\n raise NotADirectoryError(\n \"Can't open folder with annotation files '%s'\" % path)\n\n if not subset:\n subset = osp.basename(path)\n super().__init__(subset=subset)\n\n self._dataset_dir = osp.dirname(path)\n\n if task is IcdarTask.text_localization:\n self._items = list(self._load_localization_items().values())\n else:\n self._items = list(self._load_segmentation_items().values())\n\n def _load_recognition_items(self):\n items = {}\n\n with open(self._path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n objects = line.split(', ')\n if len(objects) == 2:\n image = objects[0]\n captions = []\n for caption in objects[1:]:\n if caption[0] != '\\\"' or caption[-1] != '\\\"':\n log.warning(\"Line %s: unexpected number \"\n \"of quotes\" % line)\n else:\n captions.append(caption.replace('\\\\', '')[1:-1])\n else:\n image = objects[0][:-1]\n captions = []\n\n item_id = osp.splitext(image)[0]\n image_path = osp.join(osp.dirname(self._path),\n IcdarPath.IMAGES_DIR, image)\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=image_path)\n\n annotations = items[item_id].annotations\n for caption in captions:\n annotations.append(Caption(caption))\n\n return items\n\n def _load_localization_items(self):\n items = {}\n\n image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)\n if osp.isdir(image_dir):\n images = { osp.splitext(osp.relpath(p, image_dir))[0]: p\n for p in find_images(image_dir, recursive=True) }\n else:\n images = {}\n\n for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):\n item_id = osp.splitext(osp.relpath(path, self._path))[0]\n if osp.basename(item_id).startswith('gt_'):\n item_id = osp.join(osp.dirname(item_id), osp.basename(item_id)[3:])\n item_id = item_id.replace('\\\\', '/')\n\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=images.get(item_id))\n annotations = items[item_id].annotations\n\n with open(path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n objects = line.split('\\\"')\n if 1 < len(objects):\n if len(objects) == 3:\n text = objects[1]\n else:\n raise Exception(\"Line %s: unexpected number \"\n \"of quotes in filename\" % line)\n else:\n text = ''\n objects = objects[0].split()\n if len(objects) == 1:\n objects = objects[0].split(',')\n\n if 8 <= len(objects):\n points = [float(p) for p in objects[:8]]\n\n attributes = {}\n if 0 < len(text):\n attributes['text'] = text\n elif len(objects) == 9:\n text = objects[8]\n attributes['text'] = text\n\n annotations.append(\n Polygon(points, attributes=attributes))\n elif 4 <= len(objects):\n x = float(objects[0])\n y = float(objects[1])\n w = float(objects[2]) - x\n h = float(objects[3]) - y\n\n attributes = {}\n if 0 < len(text):\n attributes['text'] = text\n elif len(objects) == 5:\n text = objects[4]\n attributes['text'] = text\n\n annotations.append(\n Bbox(x, y, w, h, attributes=attributes))\n return items\n\n def _load_segmentation_items(self):\n items = {}\n\n image_dir = osp.join(self._path, IcdarPath.IMAGES_DIR)\n if osp.isdir(image_dir):\n images = { osp.splitext(osp.relpath(p, image_dir))[0]: p\n for p in find_images(image_dir, recursive=True) }\n else:\n images = {}\n\n for path in iglob(osp.join(self._path, '**', '*.txt'), recursive=True):\n item_id = osp.splitext(osp.relpath(path, self._path))[0]\n item_id = item_id.replace('\\\\', '/')\n if item_id.endswith('_GT'):\n item_id = item_id[:-3]\n\n if item_id not in items:\n items[item_id] = DatasetItem(item_id, subset=self._subset,\n image=images.get(item_id))\n annotations = items[item_id].annotations\n\n colors = [(255, 255, 255)]\n chars = ['']\n centers = [0]\n groups = [0]\n group = 1\n number_in_group = 0\n with open(path, encoding='utf-8') as f:\n for line in f:\n line = line.strip()\n if line == '':\n if number_in_group == 1:\n groups[len(groups) - 1] = 0\n else:\n group += 1\n number_in_group = 0\n continue\n\n objects = line.split()\n if objects[0][0] == '#':\n objects[0] = objects[0][1:]\n objects[9] = '\\\" \\\"'\n objects.pop()\n if len(objects) != 10:\n raise Exception(\"Line %s contains the wrong number \"\n \"of arguments, e.g. '241 73 144 1 4 0 3 1 4 \\\"h\\\"\" % line)\n\n centers.append(objects[3] + ' ' + objects[4])\n groups.append(group)\n colors.append(tuple(int(o) for o in objects[:3]))\n char = objects[9]\n if char[0] == '\\\"' and char[-1] == '\\\"':\n char = char[1:-1]\n chars.append(char)\n number_in_group += 1\n if number_in_group == 1:\n groups[len(groups) - 1] = 0\n\n mask_categories = MaskCategories(\n {i: colors[i] for i in range(len(colors))})\n inverse_cls_colormap = mask_categories.inverse_colormap\n\n gt_path = osp.join(self._path, item_id + '_GT' + IcdarPath.GT_EXT)\n if osp.isfile(gt_path):\n # load mask through cache\n mask = lazy_mask(gt_path, inverse_cls_colormap)\n mask = mask()\n\n classes = np.unique(mask)\n for label_id in classes:\n if label_id == 0:\n continue\n i = int(label_id)\n annotations.append(Mask(group=groups[i],\n image=self._lazy_extract_mask(mask, label_id),\n attributes={ 'index': i - 1,\n 'color': ' '.join(str(p) for p in colors[i]),\n 'text': chars[i], 'center': centers[i] }\n ))\n return items\n\n @staticmethod\n def _lazy_extract_mask(mask, c):\n return lambda: mask == c\n\nclass IcdarWordRecognitionExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.word_recognition\n super().__init__(path, **kwargs)\n\nclass IcdarTextLocalizationExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.text_localization\n super().__init__(path, **kwargs)\n\nclass IcdarTextSegmentationExtractor(_IcdarExtractor):\n def __init__(self, path, **kwargs):\n kwargs['task'] = IcdarTask.text_segmentation\n super().__init__(path, **kwargs)\n\n\nclass IcdarWordRecognitionImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '.txt', 'icdar_word_recognition')\n\nclass IcdarTextLocalizationImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '', 'icdar_text_localization')\n\nclass IcdarTextSegmentationImporter(Importer):\n @classmethod\n def find_sources(cls, path):\n return cls._find_sources_recursive(path, '', 'icdar_text_segmentation')\n", "path": "datumaro/plugins/icdar_format/extractor.py"}]} | 3,205 | 317 |
gh_patches_debug_23513 | rasdani/github-patches | git_diff | pyca__cryptography-2385 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3.6 incompatibility because of getargspec
Hi, on testing pywikibot on 3.6 I noticed that your package uses `inspect.getargspec` which has been removed in 3.6 after it has been deprecated in 3.5 (although the documentation mentions it has been deprecated since Python 3).
I know that Python 3.6 hasn't been released yet but I just wanted to let you know so you have enough time to prevent issues before it is released.
Here the Travis build: https://travis-ci.org/xZise/pywikibot-core/jobs/82446934
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/utils.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import abc
8 import binascii
9 import inspect
10 import struct
11 import sys
12 import warnings
13
14
15 DeprecatedIn09 = DeprecationWarning
16 DeprecatedIn10 = PendingDeprecationWarning
17
18
19 def read_only_property(name):
20 return property(lambda self: getattr(self, name))
21
22
23 def register_interface(iface):
24 def register_decorator(klass):
25 verify_interface(iface, klass)
26 iface.register(klass)
27 return klass
28 return register_decorator
29
30
31 if hasattr(int, "from_bytes"):
32 int_from_bytes = int.from_bytes
33 else:
34 def int_from_bytes(data, byteorder, signed=False):
35 assert byteorder == 'big'
36 assert not signed
37
38 if len(data) % 4 != 0:
39 data = (b'\x00' * (4 - (len(data) % 4))) + data
40
41 result = 0
42
43 while len(data) > 0:
44 digit, = struct.unpack('>I', data[:4])
45 result = (result << 32) + digit
46 data = data[4:]
47
48 return result
49
50
51 def int_to_bytes(integer):
52 hex_string = '%x' % integer
53 n = len(hex_string)
54 return binascii.unhexlify(hex_string.zfill(n + (n & 1)))
55
56
57 class InterfaceNotImplemented(Exception):
58 pass
59
60
61 def verify_interface(iface, klass):
62 for method in iface.__abstractmethods__:
63 if not hasattr(klass, method):
64 raise InterfaceNotImplemented(
65 "{0} is missing a {1!r} method".format(klass, method)
66 )
67 if isinstance(getattr(iface, method), abc.abstractproperty):
68 # Can't properly verify these yet.
69 continue
70 spec = inspect.getargspec(getattr(iface, method))
71 actual = inspect.getargspec(getattr(klass, method))
72 if spec != actual:
73 raise InterfaceNotImplemented(
74 "{0}.{1}'s signature differs from the expected. Expected: "
75 "{2!r}. Received: {3!r}".format(
76 klass, method, spec, actual
77 )
78 )
79
80
81 if sys.version_info >= (2, 7):
82 def bit_length(x):
83 return x.bit_length()
84 else:
85 def bit_length(x):
86 return len(bin(x)) - (2 + (x <= 0))
87
88
89 class _DeprecatedValue(object):
90 def __init__(self, value, message, warning_class):
91 self.value = value
92 self.message = message
93 self.warning_class = warning_class
94
95
96 class _ModuleWithDeprecations(object):
97 def __init__(self, module):
98 self.__dict__["_module"] = module
99
100 def __getattr__(self, attr):
101 obj = getattr(self._module, attr)
102 if isinstance(obj, _DeprecatedValue):
103 warnings.warn(obj.message, obj.warning_class, stacklevel=2)
104 obj = obj.value
105 return obj
106
107 def __setattr__(self, attr, value):
108 setattr(self._module, attr, value)
109
110 def __dir__(self):
111 return ["_module"] + dir(self._module)
112
113
114 def deprecated(value, module_name, message, warning_class):
115 module = sys.modules[module_name]
116 if not isinstance(module, _ModuleWithDeprecations):
117 sys.modules[module_name] = module = _ModuleWithDeprecations(module)
118 return _DeprecatedValue(value, message, warning_class)
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py
--- a/src/cryptography/utils.py
+++ b/src/cryptography/utils.py
@@ -58,6 +58,12 @@
pass
+if hasattr(inspect, "signature"):
+ signature = inspect.signature
+else:
+ signature = inspect.getargspec
+
+
def verify_interface(iface, klass):
for method in iface.__abstractmethods__:
if not hasattr(klass, method):
@@ -67,13 +73,13 @@
if isinstance(getattr(iface, method), abc.abstractproperty):
# Can't properly verify these yet.
continue
- spec = inspect.getargspec(getattr(iface, method))
- actual = inspect.getargspec(getattr(klass, method))
- if spec != actual:
+ sig = signature(getattr(iface, method))
+ actual = signature(getattr(klass, method))
+ if sig != actual:
raise InterfaceNotImplemented(
"{0}.{1}'s signature differs from the expected. Expected: "
"{2!r}. Received: {3!r}".format(
- klass, method, spec, actual
+ klass, method, sig, actual
)
)
| {"golden_diff": "diff --git a/src/cryptography/utils.py b/src/cryptography/utils.py\n--- a/src/cryptography/utils.py\n+++ b/src/cryptography/utils.py\n@@ -58,6 +58,12 @@\n pass\n \n \n+if hasattr(inspect, \"signature\"):\n+ signature = inspect.signature\n+else:\n+ signature = inspect.getargspec\n+\n+\n def verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n@@ -67,13 +73,13 @@\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n- spec = inspect.getargspec(getattr(iface, method))\n- actual = inspect.getargspec(getattr(klass, method))\n- if spec != actual:\n+ sig = signature(getattr(iface, method))\n+ actual = signature(getattr(klass, method))\n+ if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n- klass, method, spec, actual\n+ klass, method, sig, actual\n )\n )\n", "issue": "Python 3.6 incompatibility because of getargspec\nHi, on testing pywikibot on 3.6 I noticed that your package uses `inspect.getargspec` which has been removed in 3.6 after it has been deprecated in 3.5 (although the documentation mentions it has been deprecated since Python 3).\n\nI know that Python 3.6 hasn't been released yet but I just wanted to let you know so you have enough time to prevent issues before it is released.\n\nHere the Travis build: https://travis-ci.org/xZise/pywikibot-core/jobs/82446934\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\nDeprecatedIn09 = DeprecationWarning\nDeprecatedIn10 = PendingDeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer):\n hex_string = '%x' % integer\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n spec = inspect.getargspec(getattr(iface, method))\n actual = inspect.getargspec(getattr(klass, method))\n if spec != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, spec, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport abc\nimport binascii\nimport inspect\nimport struct\nimport sys\nimport warnings\n\n\nDeprecatedIn09 = DeprecationWarning\nDeprecatedIn10 = PendingDeprecationWarning\n\n\ndef read_only_property(name):\n return property(lambda self: getattr(self, name))\n\n\ndef register_interface(iface):\n def register_decorator(klass):\n verify_interface(iface, klass)\n iface.register(klass)\n return klass\n return register_decorator\n\n\nif hasattr(int, \"from_bytes\"):\n int_from_bytes = int.from_bytes\nelse:\n def int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n\n\ndef int_to_bytes(integer):\n hex_string = '%x' % integer\n n = len(hex_string)\n return binascii.unhexlify(hex_string.zfill(n + (n & 1)))\n\n\nclass InterfaceNotImplemented(Exception):\n pass\n\n\nif hasattr(inspect, \"signature\"):\n signature = inspect.signature\nelse:\n signature = inspect.getargspec\n\n\ndef verify_interface(iface, klass):\n for method in iface.__abstractmethods__:\n if not hasattr(klass, method):\n raise InterfaceNotImplemented(\n \"{0} is missing a {1!r} method\".format(klass, method)\n )\n if isinstance(getattr(iface, method), abc.abstractproperty):\n # Can't properly verify these yet.\n continue\n sig = signature(getattr(iface, method))\n actual = signature(getattr(klass, method))\n if sig != actual:\n raise InterfaceNotImplemented(\n \"{0}.{1}'s signature differs from the expected. Expected: \"\n \"{2!r}. Received: {3!r}\".format(\n klass, method, sig, actual\n )\n )\n\n\nif sys.version_info >= (2, 7):\n def bit_length(x):\n return x.bit_length()\nelse:\n def bit_length(x):\n return len(bin(x)) - (2 + (x <= 0))\n\n\nclass _DeprecatedValue(object):\n def __init__(self, value, message, warning_class):\n self.value = value\n self.message = message\n self.warning_class = warning_class\n\n\nclass _ModuleWithDeprecations(object):\n def __init__(self, module):\n self.__dict__[\"_module\"] = module\n\n def __getattr__(self, attr):\n obj = getattr(self._module, attr)\n if isinstance(obj, _DeprecatedValue):\n warnings.warn(obj.message, obj.warning_class, stacklevel=2)\n obj = obj.value\n return obj\n\n def __setattr__(self, attr, value):\n setattr(self._module, attr, value)\n\n def __dir__(self):\n return [\"_module\"] + dir(self._module)\n\n\ndef deprecated(value, module_name, message, warning_class):\n module = sys.modules[module_name]\n if not isinstance(module, _ModuleWithDeprecations):\n sys.modules[module_name] = module = _ModuleWithDeprecations(module)\n return _DeprecatedValue(value, message, warning_class)\n", "path": "src/cryptography/utils.py"}]} | 1,443 | 272 |
gh_patches_debug_2763 | rasdani/github-patches | git_diff | google__timesketch-949 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Yeti analyzer crash
From the celery log:
```
[2019-07-17 09:11:37,463: ERROR/ForkPoolWorker-1] Task timesketch.lib.tasks.run_sketch_analyzer[46d24990-12df-4c88-a79b-a3b5f1c04b01] raised unexpected: TypeError("unsupported operand type(s) for +: 'NoneType' and 'unicode'",)
Traceback (most recent call last):
File "/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py", line 374, in trace_task
R = retval = fun(*args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/__init__.py", line 181, in __call__
return TaskBase.__call__(self, *args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py", line 629, in __protected_call__
return self.run(*args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/tasks.py", line 339, in run_sketch_analyzer
result = analyzer.run_wrapper()
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py", line 37, in wrapper
func_return = func(self, *args, **kwargs)
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py", line 403, in run_wrapper
result = self.run()
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py", line 92, in run
self.get_intrusion_sets()
File "/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py", line 75, in get_intrusion_sets
self.yeti_api_root + '/entities/filter/',
TypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `timesketch/lib/analyzers/yetiindicators.py`
Content:
```
1 """Index analyzer plugin for Yeti indicators."""
2 from __future__ import unicode_literals
3
4 from flask import current_app
5 import requests
6
7 from timesketch.lib.analyzers import interface
8 from timesketch.lib.analyzers import manager
9 from timesketch.lib import emojis
10
11
12 def build_query_for_indicators(indicators):
13 """Builds an Elasticsearch query for Yeti indicator patterns.
14
15 Prepends and appends .* to the regex to be able to search within a field.
16
17 Returns:
18 The resulting ES query string.
19 """
20 query = []
21 for domain in indicators:
22 query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))
23 return ' OR '.join(query)
24
25
26 class YetiIndicators(interface.BaseSketchAnalyzer):
27 """Index analyzer for Yeti threat intel indicators."""
28
29 NAME = 'yetiindicators'
30 DEPENDENCIES = frozenset(['domain'])
31
32 def __init__(self, index_name, sketch_id):
33 """Initialize the Index Analyzer.
34
35 Args:
36 index_name: Elasticsearch index name
37 """
38 super(YetiIndicators, self).__init__(index_name, sketch_id)
39 self.intel = {}
40 self.yeti_api_root = current_app.config.get('YETI_API_ROOT')
41 self.yeti_api_key = current_app.config.get('YETI_API_KEY')
42 self.yeti_indicator_labels = current_app.config.get(
43 'YETI_INDICATOR_LABELS', [])
44
45 def get_bad_domain_indicators(self, entity_id):
46 """Retrieves a list of indicators associated to a given entity.
47
48 Args:
49 entity_id (str): STIX ID of the entity to get associated inticators
50 from. (typically an Intrusion Set)
51
52 Returns:
53 A list of JSON objects describing a Yeti Indicator.
54 """
55 results = requests.post(
56 self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),
57 headers={'X-Yeti-API': self.yeti_api_key},
58 )
59 if results.status_code != 200:
60 return []
61 domain_indicators = []
62 for neighbor in results.json().get('vertices', {}).values():
63 if neighbor['type'] == 'x-regex' and \
64 set(self.yeti_indicator_labels) <= set(neighbor['labels']):
65 domain_indicators.append(neighbor)
66
67 return domain_indicators
68
69 def get_intrusion_sets(self):
70 """Populates the intel attribute with data from Yeti.
71
72 Retrieved intel consists of Intrusion sets and associated Indicators.
73 """
74 results = requests.post(
75 self.yeti_api_root + '/entities/filter/',
76 json={'name': '', 'type': 'intrusion-set'},
77 headers={'X-Yeti-API': self.yeti_api_key},
78 )
79 if results.status_code != 200:
80 return
81 self.intel = {item['id']: item for item in results.json()}
82 for _id in self.intel:
83 self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)
84
85 def run(self):
86 """Entry point for the analyzer.
87
88 Returns:
89 String with summary of the analyzer result
90 """
91
92 self.get_intrusion_sets()
93 actors_found = []
94 for intrusion_set in self.intel.values():
95 if not intrusion_set['indicators']:
96 continue
97
98 found = False
99
100 for indicator in intrusion_set['indicators']:
101 query = build_query_for_indicators([indicator])
102
103 events = self.event_stream(query_string=query,
104 return_fields=[])
105
106 name = intrusion_set['name']
107 for event in events:
108 found = True
109 event.add_emojis([emojis.get_emoji('SKULL')])
110 event.add_tags([name])
111 event.commit()
112 event.add_comment(
113 'Indicator "{0:s}" found for actor "{1:s}"'.format(
114 indicator['name'], name))
115
116 if found:
117 actors_found.append(name)
118 self.sketch.add_view(
119 'Domain activity for actor {0:s}'.format(name),
120 self.NAME,
121 query_string=query)
122
123 if actors_found:
124 return '{0:d} actors were found! [{1:s}]'.format(
125 len(actors_found), ', '.join(actors_found))
126 return 'No indicators were found in the timeline.'
127
128
129 manager.AnalysisManager.register_analyzer(YetiIndicators)
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/timesketch/lib/analyzers/yetiindicators.py b/timesketch/lib/analyzers/yetiindicators.py
--- a/timesketch/lib/analyzers/yetiindicators.py
+++ b/timesketch/lib/analyzers/yetiindicators.py
@@ -88,6 +88,8 @@
Returns:
String with summary of the analyzer result
"""
+ if not self.yeti_api_root or not self.yeti_api_key:
+ return 'No Yeti configuration settings found, aborting.'
self.get_intrusion_sets()
actors_found = []
| {"golden_diff": "diff --git a/timesketch/lib/analyzers/yetiindicators.py b/timesketch/lib/analyzers/yetiindicators.py\n--- a/timesketch/lib/analyzers/yetiindicators.py\n+++ b/timesketch/lib/analyzers/yetiindicators.py\n@@ -88,6 +88,8 @@\n Returns:\n String with summary of the analyzer result\n \"\"\"\n+ if not self.yeti_api_root or not self.yeti_api_key:\n+ return 'No Yeti configuration settings found, aborting.'\n \n self.get_intrusion_sets()\n actors_found = []\n", "issue": "Yeti analyzer crash\nFrom the celery log:\r\n\r\n```\r\n[2019-07-17 09:11:37,463: ERROR/ForkPoolWorker-1] Task timesketch.lib.tasks.run_sketch_analyzer[46d24990-12df-4c88-a79b-a3b5f1c04b01] raised unexpected: TypeError(\"unsupported operand type(s) for +: 'NoneType' and 'unicode'\",)\r\nTraceback (most recent call last):\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py\", line 374, in trace_task\r\n R = retval = fun(*args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/__init__.py\", line 181, in __call__\r\n return TaskBase.__call__(self, *args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/celery/app/trace.py\", line 629, in __protected_call__\r\n return self.run(*args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/tasks.py\", line 339, in run_sketch_analyzer\r\n result = analyzer.run_wrapper()\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py\", line 37, in wrapper\r\n func_return = func(self, *args, **kwargs)\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/interface.py\", line 403, in run_wrapper\r\n result = self.run()\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py\", line 92, in run\r\n self.get_intrusion_sets()\r\n File \"/opt/timesketch/env/local/lib/python2.7/site-packages/timesketch/lib/analyzers/yetiindicators.py\", line 75, in get_intrusion_sets\r\n self.yeti_api_root + '/entities/filter/',\r\nTypeError: unsupported operand type(s) for +: 'NoneType' and 'unicode'\r\n```\n", "before_files": [{"content": "\"\"\"Index analyzer plugin for Yeti indicators.\"\"\"\nfrom __future__ import unicode_literals\n\nfrom flask import current_app\nimport requests\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nfrom timesketch.lib import emojis\n\n\ndef build_query_for_indicators(indicators):\n \"\"\"Builds an Elasticsearch query for Yeti indicator patterns.\n\n Prepends and appends .* to the regex to be able to search within a field.\n\n Returns:\n The resulting ES query string.\n \"\"\"\n query = []\n for domain in indicators:\n query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))\n return ' OR '.join(query)\n\n\nclass YetiIndicators(interface.BaseSketchAnalyzer):\n \"\"\"Index analyzer for Yeti threat intel indicators.\"\"\"\n\n NAME = 'yetiindicators'\n DEPENDENCIES = frozenset(['domain'])\n\n def __init__(self, index_name, sketch_id):\n \"\"\"Initialize the Index Analyzer.\n\n Args:\n index_name: Elasticsearch index name\n \"\"\"\n super(YetiIndicators, self).__init__(index_name, sketch_id)\n self.intel = {}\n self.yeti_api_root = current_app.config.get('YETI_API_ROOT')\n self.yeti_api_key = current_app.config.get('YETI_API_KEY')\n self.yeti_indicator_labels = current_app.config.get(\n 'YETI_INDICATOR_LABELS', [])\n\n def get_bad_domain_indicators(self, entity_id):\n \"\"\"Retrieves a list of indicators associated to a given entity.\n\n Args:\n entity_id (str): STIX ID of the entity to get associated inticators\n from. (typically an Intrusion Set)\n\n Returns:\n A list of JSON objects describing a Yeti Indicator.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return []\n domain_indicators = []\n for neighbor in results.json().get('vertices', {}).values():\n if neighbor['type'] == 'x-regex' and \\\n set(self.yeti_indicator_labels) <= set(neighbor['labels']):\n domain_indicators.append(neighbor)\n\n return domain_indicators\n\n def get_intrusion_sets(self):\n \"\"\"Populates the intel attribute with data from Yeti.\n\n Retrieved intel consists of Intrusion sets and associated Indicators.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/filter/',\n json={'name': '', 'type': 'intrusion-set'},\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return\n self.intel = {item['id']: item for item in results.json()}\n for _id in self.intel:\n self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result\n \"\"\"\n\n self.get_intrusion_sets()\n actors_found = []\n for intrusion_set in self.intel.values():\n if not intrusion_set['indicators']:\n continue\n\n found = False\n\n for indicator in intrusion_set['indicators']:\n query = build_query_for_indicators([indicator])\n\n events = self.event_stream(query_string=query,\n return_fields=[])\n\n name = intrusion_set['name']\n for event in events:\n found = True\n event.add_emojis([emojis.get_emoji('SKULL')])\n event.add_tags([name])\n event.commit()\n event.add_comment(\n 'Indicator \"{0:s}\" found for actor \"{1:s}\"'.format(\n indicator['name'], name))\n\n if found:\n actors_found.append(name)\n self.sketch.add_view(\n 'Domain activity for actor {0:s}'.format(name),\n self.NAME,\n query_string=query)\n\n if actors_found:\n return '{0:d} actors were found! [{1:s}]'.format(\n len(actors_found), ', '.join(actors_found))\n return 'No indicators were found in the timeline.'\n\n\nmanager.AnalysisManager.register_analyzer(YetiIndicators)\n", "path": "timesketch/lib/analyzers/yetiindicators.py"}], "after_files": [{"content": "\"\"\"Index analyzer plugin for Yeti indicators.\"\"\"\nfrom __future__ import unicode_literals\n\nfrom flask import current_app\nimport requests\n\nfrom timesketch.lib.analyzers import interface\nfrom timesketch.lib.analyzers import manager\nfrom timesketch.lib import emojis\n\n\ndef build_query_for_indicators(indicators):\n \"\"\"Builds an Elasticsearch query for Yeti indicator patterns.\n\n Prepends and appends .* to the regex to be able to search within a field.\n\n Returns:\n The resulting ES query string.\n \"\"\"\n query = []\n for domain in indicators:\n query.append('domain:/.*{0:s}.*/'.format(domain['pattern']))\n return ' OR '.join(query)\n\n\nclass YetiIndicators(interface.BaseSketchAnalyzer):\n \"\"\"Index analyzer for Yeti threat intel indicators.\"\"\"\n\n NAME = 'yetiindicators'\n DEPENDENCIES = frozenset(['domain'])\n\n def __init__(self, index_name, sketch_id):\n \"\"\"Initialize the Index Analyzer.\n\n Args:\n index_name: Elasticsearch index name\n \"\"\"\n super(YetiIndicators, self).__init__(index_name, sketch_id)\n self.intel = {}\n self.yeti_api_root = current_app.config.get('YETI_API_ROOT')\n self.yeti_api_key = current_app.config.get('YETI_API_KEY')\n self.yeti_indicator_labels = current_app.config.get(\n 'YETI_INDICATOR_LABELS', [])\n\n def get_bad_domain_indicators(self, entity_id):\n \"\"\"Retrieves a list of indicators associated to a given entity.\n\n Args:\n entity_id (str): STIX ID of the entity to get associated inticators\n from. (typically an Intrusion Set)\n\n Returns:\n A list of JSON objects describing a Yeti Indicator.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/{0:s}/neighbors/'.format(entity_id),\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return []\n domain_indicators = []\n for neighbor in results.json().get('vertices', {}).values():\n if neighbor['type'] == 'x-regex' and \\\n set(self.yeti_indicator_labels) <= set(neighbor['labels']):\n domain_indicators.append(neighbor)\n\n return domain_indicators\n\n def get_intrusion_sets(self):\n \"\"\"Populates the intel attribute with data from Yeti.\n\n Retrieved intel consists of Intrusion sets and associated Indicators.\n \"\"\"\n results = requests.post(\n self.yeti_api_root + '/entities/filter/',\n json={'name': '', 'type': 'intrusion-set'},\n headers={'X-Yeti-API': self.yeti_api_key},\n )\n if results.status_code != 200:\n return\n self.intel = {item['id']: item for item in results.json()}\n for _id in self.intel:\n self.intel[_id]['indicators'] = self.get_bad_domain_indicators(_id)\n\n def run(self):\n \"\"\"Entry point for the analyzer.\n\n Returns:\n String with summary of the analyzer result\n \"\"\"\n if not self.yeti_api_root or not self.yeti_api_key:\n return 'No Yeti configuration settings found, aborting.'\n\n self.get_intrusion_sets()\n actors_found = []\n for intrusion_set in self.intel.values():\n if not intrusion_set['indicators']:\n continue\n\n found = False\n\n for indicator in intrusion_set['indicators']:\n query = build_query_for_indicators([indicator])\n\n events = self.event_stream(query_string=query,\n return_fields=[])\n\n name = intrusion_set['name']\n for event in events:\n found = True\n event.add_emojis([emojis.get_emoji('SKULL')])\n event.add_tags([name])\n event.commit()\n event.add_comment(\n 'Indicator \"{0:s}\" found for actor \"{1:s}\"'.format(\n indicator['name'], name))\n\n if found:\n actors_found.append(name)\n self.sketch.add_view(\n 'Domain activity for actor {0:s}'.format(name),\n self.NAME,\n query_string=query)\n\n if actors_found:\n return '{0:d} actors were found! [{1:s}]'.format(\n len(actors_found), ', '.join(actors_found))\n return 'No indicators were found in the timeline.'\n\n\nmanager.AnalysisManager.register_analyzer(YetiIndicators)\n", "path": "timesketch/lib/analyzers/yetiindicators.py"}]} | 2,013 | 134 |
gh_patches_debug_492 | rasdani/github-patches | git_diff | pytorch__ignite-506 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug with ProgressBar with TerminateOnNan
If we attach `ProgressBar` and `TerminateOnNaN` handlers to a trainer and `TerminateOnNan` stops training on the first iteration. We have the following error:
```python
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-36-b4ac10e6ccc4> in <module>
----> 1 trainer.run(train_ab_loader, max_epochs=200)
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)
357 except BaseException as e:
358 self._logger.error("Engine run is terminating due to exception: %s.", str(e))
--> 359 self._handle_exception(e)
360
361 return self.state
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _handle_exception(self, e)
322 self._fire_event(Events.EXCEPTION_RAISED, e)
323 else:
--> 324 raise e
325
326 def run(self, data, max_epochs=1):
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)
350 self._fire_event(Events.EPOCH_COMPLETED)
351
--> 352 self._fire_event(Events.COMPLETED)
353 time_taken = time.time() - start_time
354 hours, mins, secs = _to_hours_mins_secs(time_taken)
/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _fire_event(self, event_name, *event_args, **event_kwargs)
257 for func, args, kwargs in self._event_handlers[event_name]:
258 kwargs.update(event_kwargs)
--> 259 func(self, *(event_args + args), **kwargs)
260
261 def fire_event(self, event_name):
/opt/conda/lib/python3.7/site-packages/ignite/contrib/handlers/tqdm_logger.py in _close(self, engine)
115
116 def _close(self, engine):
--> 117 self.pbar.close()
118 self.pbar = None
119
AttributeError: 'NoneType' object has no attribute 'close'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/handlers/tqdm_logger.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import numbers
3 import warnings
4
5 import torch
6
7 from ignite.engine import Events
8
9 from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
10
11
12 class ProgressBar(BaseLogger):
13 """
14 TQDM progress bar handler to log training progress and computed metrics.
15
16 Args:
17 persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
18 bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
19 [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
20 Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
21 l_bar='{desc}: {percentage:3.0f}%|' and
22 r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
23 formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
24 **tqdm_kwargs: kwargs passed to tqdm progress bar.
25 By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
26 number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
27 "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
28
29 Examples:
30
31 Simple progress bar
32
33 .. code-block:: python
34
35 trainer = create_supervised_trainer(model, optimizer, loss)
36
37 pbar = ProgressBar()
38 pbar.attach(trainer)
39
40 # Progress bar will looks like
41 # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
42
43 Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`
44 (such as :class:`~ignite.metrics.RunningAverage`)
45
46 .. code-block:: python
47
48 trainer = create_supervised_trainer(model, optimizer, loss)
49
50 RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
51
52 pbar = ProgressBar()
53 pbar.attach(trainer, ['loss'])
54
55 # Progress bar will looks like
56 # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
57
58 Directly attach the engine's output
59
60 .. code-block:: python
61
62 trainer = create_supervised_trainer(model, optimizer, loss)
63
64 pbar = ProgressBar()
65 pbar.attach(trainer, output_transform=lambda x: {'loss': x})
66
67 # Progress bar will looks like
68 # Epoch [2/50]: [64/128] 50%|█████ , loss=12.34e-02 [06:17<12:34]
69
70 Note:
71 When adding attaching the progress bar to an engine, it is recommend that you replace
72 every print operation in the engine's handlers triggered every iteration with
73 ``pbar.log_message`` to guarantee the correct format of the stdout.
74
75 Note:
76 When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
77 please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
78 Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
79 to an empty string value.
80
81 """
82
83 events_order = [
84 Events.STARTED,
85 Events.EPOCH_STARTED,
86 Events.ITERATION_STARTED,
87 Events.ITERATION_COMPLETED,
88 Events.EPOCH_COMPLETED,
89 Events.COMPLETED
90 ]
91
92 def __init__(self, persist=False,
93 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
94 **tqdm_kwargs):
95
96 try:
97 from tqdm.autonotebook import tqdm
98 except ImportError:
99 raise RuntimeError("This contrib module requires tqdm to be installed. "
100 "Please install it with command: \n pip install tqdm")
101
102 self.pbar_cls = tqdm
103 self.pbar = None
104 self.persist = persist
105 self.bar_format = bar_format
106 self.tqdm_kwargs = tqdm_kwargs
107
108 def _reset(self, pbar_total):
109 self.pbar = self.pbar_cls(
110 total=pbar_total,
111 leave=self.persist,
112 bar_format=self.bar_format,
113 **self.tqdm_kwargs
114 )
115
116 def _close(self, engine):
117 self.pbar.close()
118 self.pbar = None
119
120 @staticmethod
121 def _compare_lt(event1, event2):
122 i1 = ProgressBar.events_order.index(event1)
123 i2 = ProgressBar.events_order.index(event2)
124 return i1 < i2
125
126 @staticmethod
127 def log_message(message):
128 """
129 Logs a message, preserving the progress bar correct output format.
130
131 Args:
132 message (str): string you wish to log.
133 """
134 from tqdm import tqdm
135 tqdm.write(message)
136
137 def attach(self, engine, metric_names=None, output_transform=None,
138 event_name=Events.ITERATION_COMPLETED,
139 closing_event_name=Events.EPOCH_COMPLETED):
140 """
141 Attaches the progress bar to an engine object.
142
143 Args:
144 engine (Engine): engine object.
145 metric_names (list, optional): list of the metrics names to log as the bar progresses
146 output_transform (callable, optional): a function to select what you want to print from the engine's
147 output. This function may return either a dictionary with entries in the format of ``{name: value}``,
148 or a single scalar, which will be displayed with the default name `output`.
149 event_name: event's name on which the progress bar advances. Valid events are from
150 :class:`~ignite.engine.Events`.
151 closing_event_name: event's name on which the progress bar is closed. Valid events are from
152 :class:`~ignite.engine.Events`.
153 """
154 desc = self.tqdm_kwargs.get("desc", "Epoch")
155
156 if not (event_name in Events and closing_event_name in Events):
157 raise ValueError("Logging and closing events should be only ignite.engine.Events")
158
159 if not self._compare_lt(event_name, closing_event_name):
160 raise ValueError("Logging event {} should be called before closing event {}"
161 .format(event_name, closing_event_name))
162
163 log_handler = _OutputHandler(desc, metric_names, output_transform,
164 event_name=event_name,
165 closing_event_name=closing_event_name)
166 super(ProgressBar, self).attach(engine, log_handler, event_name)
167 engine.add_event_handler(closing_event_name, self._close)
168
169
170 class _OutputHandler(BaseOutputHandler):
171 """Helper handler to log engine's output and/or metrics
172
173 Args:
174 description (str): progress bar description.
175 metric_names (list of str, optional): list of metric names to plot.
176 output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
177 For example, `output_transform = lambda output: output`
178 This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot
179 with corresponding keys.
180 event_name: event's name on which the progress bar advances. Valid events are from
181 :class:`~ignite.engine.Events` or any `event_name` added by
182 :meth:`~ignite.engine.Engine.register_events`.
183 closing_event_name: event's name on which the progress bar is closed. Valid events are from
184 :class:`~ignite.engine.Events` or any `event_name` added by
185 :meth:`~ignite.engine.Engine.register_events`.
186
187 """
188 def __init__(self, description, metric_names=None, output_transform=None,
189 event_name=Events.ITERATION_COMPLETED,
190 closing_event_name=Events.EPOCH_COMPLETED):
191 if metric_names is None and output_transform is None:
192 # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
193 metric_names = []
194 super(_OutputHandler, self).__init__(description, metric_names, output_transform, another_engine=None)
195 self.event_name = event_name
196 self.closing_event_name = closing_event_name
197
198 @staticmethod
199 def get_max_number_events(event_name, engine):
200 if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
201 return len(engine.state.dataloader)
202 if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
203 return engine.state.max_epochs
204 return 1
205
206 def __call__(self, engine, logger, event_name):
207
208 if logger.pbar is None:
209 logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))
210
211 desc = self.tag
212 max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
213 if max_num_of_closing_events > 1:
214 global_step = engine.state.get_event_attrib_value(self.closing_event_name)
215 desc += " [{}/{}]".format(global_step, max_num_of_closing_events)
216 logger.pbar.set_description(desc)
217
218 metrics = self._setup_output_metrics(engine)
219
220 rendered_metrics = {}
221 for key, value in metrics.items():
222 if isinstance(value, numbers.Number) or \
223 isinstance(value, torch.Tensor) and value.ndimension() == 0:
224 rendered_metrics[key] = "{:.2e}".format(value)
225 elif isinstance(value, torch.Tensor) and value.ndimension() == 1:
226 for i, v in enumerate(value):
227 k = "{}_{}".format(key, i)
228 rendered_metrics[k] = "{:.2e}".format(v)
229 else:
230 warnings.warn("ProgressBar can not log "
231 "metrics value type {}".format(type(value)))
232
233 if rendered_metrics:
234 logger.pbar.set_postfix(**rendered_metrics)
235
236 logger.pbar.update()
237
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -114,7 +114,8 @@
)
def _close(self, engine):
- self.pbar.close()
+ if self.pbar:
+ self.pbar.close()
self.pbar = None
@staticmethod
| {"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -114,7 +114,8 @@\n )\n \n def _close(self, engine):\n- self.pbar.close()\n+ if self.pbar:\n+ self.pbar.close()\n self.pbar = None\n \n @staticmethod\n", "issue": "Bug with ProgressBar with TerminateOnNan\nIf we attach `ProgressBar` and `TerminateOnNaN` handlers to a trainer and `TerminateOnNan` stops training on the first iteration. We have the following error:\r\n\r\n```python\r\n---------------------------------------------------------------------------\r\nAttributeError Traceback (most recent call last)\r\n<ipython-input-36-b4ac10e6ccc4> in <module>\r\n----> 1 trainer.run(train_ab_loader, max_epochs=200)\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)\r\n 357 except BaseException as e:\r\n 358 self._logger.error(\"Engine run is terminating due to exception: %s.\", str(e))\r\n--> 359 self._handle_exception(e)\r\n 360 \r\n 361 return self.state\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _handle_exception(self, e)\r\n 322 self._fire_event(Events.EXCEPTION_RAISED, e)\r\n 323 else:\r\n--> 324 raise e\r\n 325 \r\n 326 def run(self, data, max_epochs=1):\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in run(self, data, max_epochs)\r\n 350 self._fire_event(Events.EPOCH_COMPLETED)\r\n 351 \r\n--> 352 self._fire_event(Events.COMPLETED)\r\n 353 time_taken = time.time() - start_time\r\n 354 hours, mins, secs = _to_hours_mins_secs(time_taken)\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/engine/engine.py in _fire_event(self, event_name, *event_args, **event_kwargs)\r\n 257 for func, args, kwargs in self._event_handlers[event_name]:\r\n 258 kwargs.update(event_kwargs)\r\n--> 259 func(self, *(event_args + args), **kwargs)\r\n 260 \r\n 261 def fire_event(self, event_name):\r\n\r\n/opt/conda/lib/python3.7/site-packages/ignite/contrib/handlers/tqdm_logger.py in _close(self, engine)\r\n 115 \r\n 116 def _close(self, engine):\r\n--> 117 self.pbar.close()\r\n 118 self.pbar = None\r\n 119 \r\n\r\nAttributeError: 'NoneType' object has no attribute 'close'\r\n```\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport numbers\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\n\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n i1 = ProgressBar.events_order.index(event1)\n i2 = ProgressBar.events_order.index(event2)\n return i1 < i2\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list, optional): list of the metrics names to log as the bar progresses\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not (event_name in Events and closing_event_name in Events):\n raise ValueError(\"Logging and closing events should be only ignite.engine.Events\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n event_name=event_name,\n closing_event_name=closing_event_name)\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot\n with corresponding keys.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n def __init__(self, description, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform, another_engine=None)\n self.event_name = event_name\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n if logger.pbar is None:\n logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, numbers.Number) or \\\n isinstance(value, torch.Tensor) and value.ndimension() == 0:\n rendered_metrics[key] = \"{:.2e}\".format(value)\n elif isinstance(value, torch.Tensor) and value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = \"{:.2e}\".format(v)\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"metrics value type {}\".format(type(value)))\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n logger.pbar.update()\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport numbers\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\n\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=12.34e-02 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n i1 = ProgressBar.events_order.index(event1)\n i2 = ProgressBar.events_order.index(event2)\n return i1 < i2\n\n @staticmethod\n def log_message(message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list, optional): list of the metrics names to log as the bar progresses\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not (event_name in Events and closing_event_name in Events):\n raise ValueError(\"Logging and closing events should be only ignite.engine.Events\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n event_name=event_name,\n closing_event_name=closing_event_name)\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, `another_loss`: loss2}` to label the plot\n with corresponding keys.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n def __init__(self, description, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform, another_engine=None)\n self.event_name = event_name\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n if logger.pbar is None:\n logger._reset(pbar_total=self.get_max_number_events(self.event_name, engine))\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, numbers.Number) or \\\n isinstance(value, torch.Tensor) and value.ndimension() == 0:\n rendered_metrics[key] = \"{:.2e}\".format(value)\n elif isinstance(value, torch.Tensor) and value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = \"{:.2e}\".format(v)\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"metrics value type {}\".format(type(value)))\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n logger.pbar.update()\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]} | 3,693 | 117 |
gh_patches_debug_20930 | rasdani/github-patches | git_diff | wright-group__WrightTools-543 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
multiplication doesn't seem to work in d.transform
`d.transform('w2+w2+w2', 'w1', 'd2')` works.
`d.transform('3*w2', 'w1', 'd2')` does not work (even with varying spacing around the multiplication operator)
Part of the error that is raised:
```
File "/home/darien/source/WrightTools/WrightTools/data/_data.py", line 1306, in transform
axis = current.get(expression, Axis(self, expression))
File "/home/darien/source/WrightTools/WrightTools/data/_axis.py", line 53, in __init__
self.units = self.variables[0].units
File "/home/darien/source/WrightTools/WrightTools/data/_axis.py", line 152, in variables
return self._variables
AttributeError: 'Axis' object has no attribute '_variables'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `WrightTools/data/_axis.py`
Content:
```
1 """Axis class and associated."""
2
3
4 # --- import --------------------------------------------------------------------------------------
5
6
7 import re
8 import numexpr
9 import operator
10 import functools
11
12 import numpy as np
13
14 from .. import exceptions as wt_exceptions
15 from .. import kit as wt_kit
16 from .. import units as wt_units
17
18
19 # --- define --------------------------------------------------------------------------------------
20
21
22 operator_to_identifier = {}
23 operator_to_identifier['/'] = '__d__'
24 operator_to_identifier['='] = '__e__'
25 operator_to_identifier['-'] = '__m__'
26 operator_to_identifier['+'] = '__p__'
27 operator_to_identifier['*'] = '__t__'
28 identifier_to_operator = {value: key for key, value in operator_to_identifier.items()}
29 operators = ''.join(operator_to_identifier.keys())
30
31
32 # --- class ---------------------------------------------------------------------------------------
33
34
35 class Axis(object):
36 """Axis class."""
37
38 def __init__(self, parent, expression, units=None):
39 """Data axis.
40
41 Parameters
42 ----------
43 parent : WrightTools.Data
44 Parent data object.
45 expression : string
46 Axis expression.
47 units : string (optional)
48 Axis units. Default is None.
49 """
50 self.parent = parent
51 self.expression = expression
52 if units is None:
53 self.units = self.variables[0].units
54 else:
55 self.units = units
56
57 def __getitem__(self, index):
58 vs = {}
59 for variable in self.variables:
60 arr = variable[index]
61 vs[variable.natural_name] = wt_units.converter(arr, variable.units, self.units)
62 return numexpr.evaluate(self.expression.split('=')[0], local_dict=vs)
63
64 def __repr__(self):
65 return '<WrightTools.Axis {0} ({1}) at {2}>'.format(self.expression, str(self.units),
66 id(self))
67
68 @property
69 def _leaf(self):
70 out = self.expression
71 if self.units is not None:
72 out += ' ({0}) {1}'.format(self.units, self.shape)
73 return out
74
75 @property
76 def full(self):
77 arr = self[:]
78 for i in range(arr.ndim):
79 if arr.shape[i] == 1:
80 arr = np.repeat(arr, self.parent.shape[i], axis=i)
81 return arr
82
83 @property
84 def identity(self):
85 return self.natural_name + ' {%s}' % self.units
86
87 @property
88 def label(self):
89 symbol = wt_units.get_symbol(self.units)
90 label = r'$\mathsf{' + self.expression
91 for v in self.variables:
92 label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))
93 if self.units_kind:
94 units_dictionary = getattr(wt_units, self.units_kind)
95 label += r'\,'
96 label += r'\left('
97 label += units_dictionary[self.units][2]
98 label += r'\right)'
99 else:
100 pass
101 label += r'}$'
102 return label
103
104 @property
105 def natural_name(self):
106 name = self.expression.strip()
107 for op in operators:
108 name = name.replace(op, operator_to_identifier[op])
109 return name
110
111 @property
112 def ndim(self):
113 """Get number of dimensions."""
114 try:
115 assert self._ndim is not None
116 except (AssertionError, AttributeError):
117 self._ndim = self.variables[0].ndim
118 finally:
119 return self._ndim
120
121 @property
122 def points(self):
123 """Squeezed array."""
124 return np.squeeze(self[:])
125
126 @property
127 def shape(self):
128 """Shape."""
129 return wt_kit.joint_shape(*self.variables)
130
131 @property
132 def size(self):
133 """Size."""
134 return functools.reduce(operator.mul, self.shape)
135
136 @property
137 def units_kind(self):
138 """Units kind."""
139 return wt_units.kind(self.units)
140
141 @property
142 def variables(self):
143 """Variables."""
144 try:
145 assert self._variables is not None
146 except (AssertionError, AttributeError):
147 pattern = '|'.join(map(re.escape, operators))
148 keys = re.split(pattern, self.expression)
149 indices = [self.parent.variable_names.index(key) for key in keys]
150 self._variables = [self.parent.variables[i] for i in indices]
151 finally:
152 return self._variables
153
154 def convert(self, destination_units, *, convert_variables=False):
155 """Convert axis to destination_units.
156
157 Parameters
158 ----------
159 destination_units : string
160 Destination units.
161 convert_variables : boolean (optional)
162 Toggle conversion of stored arrays. Default is False.
163 """
164 if not wt_units.is_valid_conversion(self.units, destination_units):
165 kind = wt_units.kind(self.units)
166 valid = list(wt_units.dicts[kind].keys())
167 raise wt_exceptions.UnitsError(valid, destination_units)
168 if convert_variables:
169 for v in self.variables:
170 v.convert(destination_units)
171 self.units = destination_units
172
173 def max(self):
174 """Axis max."""
175 return np.max(self[:])
176
177 def min(self):
178 """Axis min."""
179 return np.min(self[:])
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/WrightTools/data/_axis.py b/WrightTools/data/_axis.py
--- a/WrightTools/data/_axis.py
+++ b/WrightTools/data/_axis.py
@@ -106,7 +106,7 @@
name = self.expression.strip()
for op in operators:
name = name.replace(op, operator_to_identifier[op])
- return name
+ return wt_kit.string2identifier(name)
@property
def ndim(self):
@@ -146,7 +146,10 @@
except (AssertionError, AttributeError):
pattern = '|'.join(map(re.escape, operators))
keys = re.split(pattern, self.expression)
- indices = [self.parent.variable_names.index(key) for key in keys]
+ indices = []
+ for key in keys:
+ if key in self.parent.variable_names:
+ indices.append(self.parent.variable_names.index(key))
self._variables = [self.parent.variables[i] for i in indices]
finally:
return self._variables
| {"golden_diff": "diff --git a/WrightTools/data/_axis.py b/WrightTools/data/_axis.py\n--- a/WrightTools/data/_axis.py\n+++ b/WrightTools/data/_axis.py\n@@ -106,7 +106,7 @@\n name = self.expression.strip()\n for op in operators:\n name = name.replace(op, operator_to_identifier[op])\n- return name\n+ return wt_kit.string2identifier(name)\n \n @property\n def ndim(self):\n@@ -146,7 +146,10 @@\n except (AssertionError, AttributeError):\n pattern = '|'.join(map(re.escape, operators))\n keys = re.split(pattern, self.expression)\n- indices = [self.parent.variable_names.index(key) for key in keys]\n+ indices = []\n+ for key in keys:\n+ if key in self.parent.variable_names:\n+ indices.append(self.parent.variable_names.index(key))\n self._variables = [self.parent.variables[i] for i in indices]\n finally:\n return self._variables\n", "issue": "multiplication doesn't seem to work in d.transform\n`d.transform('w2+w2+w2', 'w1', 'd2')` works.\r\n\r\n`d.transform('3*w2', 'w1', 'd2')` does not work (even with varying spacing around the multiplication operator)\r\nPart of the error that is raised:\r\n```\r\n File \"/home/darien/source/WrightTools/WrightTools/data/_data.py\", line 1306, in transform\r\n axis = current.get(expression, Axis(self, expression))\r\n\r\n File \"/home/darien/source/WrightTools/WrightTools/data/_axis.py\", line 53, in __init__\r\n self.units = self.variables[0].units\r\n\r\n File \"/home/darien/source/WrightTools/WrightTools/data/_axis.py\", line 152, in variables\r\n return self._variables\r\n\r\nAttributeError: 'Axis' object has no attribute '_variables'\r\n```\n", "before_files": [{"content": "\"\"\"Axis class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport re\nimport numexpr\nimport operator\nimport functools\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\nfrom .. import kit as wt_kit\nfrom .. import units as wt_units\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\noperator_to_identifier = {}\noperator_to_identifier['/'] = '__d__'\noperator_to_identifier['='] = '__e__'\noperator_to_identifier['-'] = '__m__'\noperator_to_identifier['+'] = '__p__'\noperator_to_identifier['*'] = '__t__'\nidentifier_to_operator = {value: key for key, value in operator_to_identifier.items()}\noperators = ''.join(operator_to_identifier.keys())\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Axis(object):\n \"\"\"Axis class.\"\"\"\n\n def __init__(self, parent, expression, units=None):\n \"\"\"Data axis.\n\n Parameters\n ----------\n parent : WrightTools.Data\n Parent data object.\n expression : string\n Axis expression.\n units : string (optional)\n Axis units. Default is None.\n \"\"\"\n self.parent = parent\n self.expression = expression\n if units is None:\n self.units = self.variables[0].units\n else:\n self.units = units\n\n def __getitem__(self, index):\n vs = {}\n for variable in self.variables:\n arr = variable[index]\n vs[variable.natural_name] = wt_units.converter(arr, variable.units, self.units)\n return numexpr.evaluate(self.expression.split('=')[0], local_dict=vs)\n\n def __repr__(self):\n return '<WrightTools.Axis {0} ({1}) at {2}>'.format(self.expression, str(self.units),\n id(self))\n\n @property\n def _leaf(self):\n out = self.expression\n if self.units is not None:\n out += ' ({0}) {1}'.format(self.units, self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def identity(self):\n return self.natural_name + ' {%s}' % self.units\n\n @property\n def label(self):\n symbol = wt_units.get_symbol(self.units)\n label = r'$\\mathsf{' + self.expression\n for v in self.variables:\n label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))\n if self.units_kind:\n units_dictionary = getattr(wt_units, self.units_kind)\n label += r'\\,'\n label += r'\\left('\n label += units_dictionary[self.units][2]\n label += r'\\right)'\n else:\n pass\n label += r'}$'\n return label\n\n @property\n def natural_name(self):\n name = self.expression.strip()\n for op in operators:\n name = name.replace(op, operator_to_identifier[op])\n return name\n\n @property\n def ndim(self):\n \"\"\"Get number of dimensions.\"\"\"\n try:\n assert self._ndim is not None\n except (AssertionError, AttributeError):\n self._ndim = self.variables[0].ndim\n finally:\n return self._ndim\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def shape(self):\n \"\"\"Shape.\"\"\"\n return wt_kit.joint_shape(*self.variables)\n\n @property\n def size(self):\n \"\"\"Size.\"\"\"\n return functools.reduce(operator.mul, self.shape)\n\n @property\n def units_kind(self):\n \"\"\"Units kind.\"\"\"\n return wt_units.kind(self.units)\n\n @property\n def variables(self):\n \"\"\"Variables.\"\"\"\n try:\n assert self._variables is not None\n except (AssertionError, AttributeError):\n pattern = '|'.join(map(re.escape, operators))\n keys = re.split(pattern, self.expression)\n indices = [self.parent.variable_names.index(key) for key in keys]\n self._variables = [self.parent.variables[i] for i in indices]\n finally:\n return self._variables\n\n def convert(self, destination_units, *, convert_variables=False):\n \"\"\"Convert axis to destination_units.\n\n Parameters\n ----------\n destination_units : string\n Destination units.\n convert_variables : boolean (optional)\n Toggle conversion of stored arrays. Default is False.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if convert_variables:\n for v in self.variables:\n v.convert(destination_units)\n self.units = destination_units\n\n def max(self):\n \"\"\"Axis max.\"\"\"\n return np.max(self[:])\n\n def min(self):\n \"\"\"Axis min.\"\"\"\n return np.min(self[:])\n", "path": "WrightTools/data/_axis.py"}], "after_files": [{"content": "\"\"\"Axis class and associated.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport re\nimport numexpr\nimport operator\nimport functools\n\nimport numpy as np\n\nfrom .. import exceptions as wt_exceptions\nfrom .. import kit as wt_kit\nfrom .. import units as wt_units\n\n\n# --- define --------------------------------------------------------------------------------------\n\n\noperator_to_identifier = {}\noperator_to_identifier['/'] = '__d__'\noperator_to_identifier['='] = '__e__'\noperator_to_identifier['-'] = '__m__'\noperator_to_identifier['+'] = '__p__'\noperator_to_identifier['*'] = '__t__'\nidentifier_to_operator = {value: key for key, value in operator_to_identifier.items()}\noperators = ''.join(operator_to_identifier.keys())\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Axis(object):\n \"\"\"Axis class.\"\"\"\n\n def __init__(self, parent, expression, units=None):\n \"\"\"Data axis.\n\n Parameters\n ----------\n parent : WrightTools.Data\n Parent data object.\n expression : string\n Axis expression.\n units : string (optional)\n Axis units. Default is None.\n \"\"\"\n self.parent = parent\n self.expression = expression\n if units is None:\n self.units = self.variables[0].units\n else:\n self.units = units\n\n def __getitem__(self, index):\n vs = {}\n for variable in self.variables:\n arr = variable[index]\n vs[variable.natural_name] = wt_units.converter(arr, variable.units, self.units)\n return numexpr.evaluate(self.expression.split('=')[0], local_dict=vs)\n\n def __repr__(self):\n return '<WrightTools.Axis {0} ({1}) at {2}>'.format(self.expression, str(self.units),\n id(self))\n\n @property\n def _leaf(self):\n out = self.expression\n if self.units is not None:\n out += ' ({0}) {1}'.format(self.units, self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def identity(self):\n return self.natural_name + ' {%s}' % self.units\n\n @property\n def label(self):\n symbol = wt_units.get_symbol(self.units)\n label = r'$\\mathsf{' + self.expression\n for v in self.variables:\n label = label.replace(v.natural_name, '%s_{%s}' % (symbol, v.label))\n if self.units_kind:\n units_dictionary = getattr(wt_units, self.units_kind)\n label += r'\\,'\n label += r'\\left('\n label += units_dictionary[self.units][2]\n label += r'\\right)'\n else:\n pass\n label += r'}$'\n return label\n\n @property\n def natural_name(self):\n name = self.expression.strip()\n for op in operators:\n name = name.replace(op, operator_to_identifier[op])\n return wt_kit.string2identifier(name)\n\n @property\n def ndim(self):\n \"\"\"Get number of dimensions.\"\"\"\n try:\n assert self._ndim is not None\n except (AssertionError, AttributeError):\n self._ndim = self.variables[0].ndim\n finally:\n return self._ndim\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def shape(self):\n \"\"\"Shape.\"\"\"\n return wt_kit.joint_shape(*self.variables)\n\n @property\n def size(self):\n \"\"\"Size.\"\"\"\n return functools.reduce(operator.mul, self.shape)\n\n @property\n def units_kind(self):\n \"\"\"Units kind.\"\"\"\n return wt_units.kind(self.units)\n\n @property\n def variables(self):\n \"\"\"Variables.\"\"\"\n try:\n assert self._variables is not None\n except (AssertionError, AttributeError):\n pattern = '|'.join(map(re.escape, operators))\n keys = re.split(pattern, self.expression)\n indices = []\n for key in keys:\n if key in self.parent.variable_names:\n indices.append(self.parent.variable_names.index(key))\n self._variables = [self.parent.variables[i] for i in indices]\n finally:\n return self._variables\n\n def convert(self, destination_units, *, convert_variables=False):\n \"\"\"Convert axis to destination_units.\n\n Parameters\n ----------\n destination_units : string\n Destination units.\n convert_variables : boolean (optional)\n Toggle conversion of stored arrays. Default is False.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if convert_variables:\n for v in self.variables:\n v.convert(destination_units)\n self.units = destination_units\n\n def max(self):\n \"\"\"Axis max.\"\"\"\n return np.max(self[:])\n\n def min(self):\n \"\"\"Axis min.\"\"\"\n return np.min(self[:])\n", "path": "WrightTools/data/_axis.py"}]} | 1,995 | 226 |
gh_patches_debug_13642 | rasdani/github-patches | git_diff | mdn__kuma-6547 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
/ko/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype
# Request type
<!-- Select the appropriate option -->
- [ ] Please close this issue, I accidentally submitted it without adding any details
- [ ] New documentation
- [x] Correction or update
# Details
I was trying to make it redirect to the main `Array` document, but I can't edit it. (404)
Seems like that if the upstream document (`/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype`) is removed, localized ones throw 404 in `$edit`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kuma/wiki/views/translate.py`
Content:
```
1 from urllib.parse import urlencode
2
3 from csp.decorators import csp_update
4 from django.conf import settings
5 from django.core.exceptions import ObjectDoesNotExist
6 from django.http import Http404, JsonResponse
7 from django.shortcuts import get_object_or_404, redirect, render
8 from django.utils.safestring import mark_safe
9 from django.utils.translation import ugettext_lazy as _
10 from django.views.decorators.cache import never_cache
11
12 import kuma.wiki.content
13 from kuma.attachments.forms import AttachmentRevisionForm
14 from kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required
15 from kuma.core.i18n import get_language_mapping
16 from kuma.core.urlresolvers import reverse
17 from kuma.core.utils import get_object_or_none, smart_int, urlparams
18
19 from .utils import document_form_initial, split_slug
20 from ..decorators import check_readonly, prevent_indexing, process_document_path
21 from ..forms import DocumentForm, RevisionForm
22 from ..models import Document, Revision
23
24
25 @ensure_wiki_domain
26 @never_cache
27 @block_user_agents
28 @login_required
29 @process_document_path
30 def select_locale(request, document_slug, document_locale):
31 """
32 Select a locale to translate the document to.
33 """
34 doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)
35 return render(request, "wiki/select_locale.html", {"document": doc})
36
37
38 @ensure_wiki_domain
39 @never_cache
40 @block_user_agents
41 @login_required
42 @csp_update(SCRIPT_SRC="'unsafe-eval'") # Required until CKEditor 4.7
43 @process_document_path
44 @check_readonly
45 @prevent_indexing
46 def translate(request, document_slug, document_locale):
47 """
48 Create a new translation of a wiki document.
49
50 * document_slug is for the default locale
51 * translation is to the request locale
52 """
53 # TODO: Refactor this view into two views? (new, edit)
54 # That might help reduce the headache-inducing branchiness.
55
56 # The parent document to translate from
57 try:
58 # Use '.all_objects' because the parent might have been soft deleted.
59 # And if we don't respect that fact, it would become impossible to
60 # edit a the child of it.
61 parent_doc = Document.all_objects.get(
62 locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug
63 )
64 except Document.DoesNotExist:
65 raise Http404("Parent document does not exist")
66
67 # Get the mapping here and now so it can be used for input validation
68 language_mapping = get_language_mapping()
69
70 # HACK: Seems weird, but sticking the translate-to locale in a query
71 # param is the best way to avoid the MindTouch-legacy locale
72 # redirection logic.
73 document_locale = request.GET.get("tolocale", document_locale)
74 if document_locale.lower() not in language_mapping:
75 # The 'tolocale' query string parameters aren't free-text. They're
76 # explicitly listed on the "Select language" page (`...$locales`)
77 # If a locale was entered that wasn't a link it's a user bug.
78 raise Http404
79
80 # Set a "Discard Changes" page
81 discard_href = ""
82
83 if settings.WIKI_DEFAULT_LANGUAGE == document_locale:
84 # Don't translate to the default language.
85 return redirect(
86 reverse(
87 "wiki.edit",
88 locale=settings.WIKI_DEFAULT_LANGUAGE,
89 args=[parent_doc.slug],
90 )
91 )
92
93 if not parent_doc.is_localizable:
94 message = _("You cannot translate this document.")
95 context = {"message": message}
96 return render(request, "handlers/400.html", context, status=400)
97
98 based_on_rev = parent_doc.current_or_latest_revision()
99
100 disclose_description = bool(request.GET.get("opendescription"))
101
102 try:
103 doc = parent_doc.translations.get(locale=document_locale)
104 slug_dict = split_slug(doc.slug)
105 except Document.DoesNotExist:
106 doc = None
107 disclose_description = True
108 slug_dict = split_slug(document_slug)
109
110 # Find the "real" parent topic, which is its translation
111 if parent_doc.parent_topic:
112 try:
113 parent_topic_translated_doc = parent_doc.parent_topic.translations.get(
114 locale=document_locale
115 )
116 slug_dict = split_slug(
117 parent_topic_translated_doc.slug + "/" + slug_dict["specific"]
118 )
119 except ObjectDoesNotExist:
120 pass
121
122 user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))
123
124 doc_form = None
125 if user_has_doc_perm:
126 if doc:
127 # If there's an existing doc, populate form from it.
128 discard_href = doc.get_absolute_url()
129 doc.slug = slug_dict["specific"]
130 doc_initial = document_form_initial(doc)
131 else:
132 # If no existing doc, bring over the original title and slug.
133 discard_href = parent_doc.get_absolute_url()
134 doc_initial = {"title": based_on_rev.title, "slug": slug_dict["specific"]}
135 doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict["parent"])
136
137 initial = {
138 "based_on": based_on_rev.id,
139 "current_rev": doc.current_or_latest_revision().id if doc else None,
140 "comment": "",
141 "toc_depth": based_on_rev.toc_depth,
142 "localization_tags": ["inprogress"],
143 }
144 content = None
145 if not doc:
146 content = based_on_rev.content
147 if content:
148 # TODO: There will be no need to "filterEditorSafety" when the code
149 # that calls "clean_content" on Revision.save is deployed to
150 # production, AND the current revisions of all docs have had
151 # their content cleaned with "clean_content".
152 initial.update(
153 content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()
154 )
155 instance = doc and doc.current_or_latest_revision()
156 rev_form = RevisionForm(
157 request=request,
158 instance=instance,
159 initial=initial,
160 parent_slug=slug_dict["parent"],
161 )
162
163 if request.method == "POST":
164 which_form = request.POST.get("form-type", "both")
165 doc_form_invalid = False
166
167 # Grab the posted slug value in case it's invalid
168 posted_slug = request.POST.get("slug", slug_dict["specific"])
169
170 if user_has_doc_perm and which_form in ["doc", "both"]:
171 disclose_description = True
172 post_data = request.POST.copy()
173
174 post_data.update({"locale": document_locale})
175
176 doc_form = DocumentForm(
177 post_data, instance=doc, parent_slug=slug_dict["parent"]
178 )
179 doc_form.instance.locale = document_locale
180 doc_form.instance.parent = parent_doc
181
182 if which_form == "both":
183 # Sending a new copy of post so the slug change above
184 # doesn't cause problems during validation
185 rev_form = RevisionForm(
186 request=request, data=post_data, parent_slug=slug_dict["parent"]
187 )
188
189 # If we are submitting the whole form, we need to check that
190 # the Revision is valid before saving the Document.
191 if doc_form.is_valid() and (which_form == "doc" or rev_form.is_valid()):
192 doc = doc_form.save(parent=parent_doc)
193
194 if which_form == "doc":
195 url = urlparams(doc.get_edit_url(), opendescription=1)
196 return redirect(url)
197 else:
198 doc_form.data["slug"] = posted_slug
199 doc_form_invalid = True
200
201 if doc and which_form in ["rev", "both"]:
202 post_data = request.POST.copy()
203 if "slug" not in post_data:
204 post_data["slug"] = posted_slug
205
206 # update the post data with the toc_depth of original
207 post_data["toc_depth"] = based_on_rev.toc_depth
208
209 # Pass in the locale for the akistmet "blog_lang".
210 post_data["locale"] = document_locale
211
212 rev_form = RevisionForm(
213 request=request, data=post_data, parent_slug=slug_dict["parent"]
214 )
215 rev_form.instance.document = doc # for rev_form.clean()
216
217 if rev_form.is_valid() and not doc_form_invalid:
218 parent_id = request.POST.get("parent_id", "")
219
220 # Attempt to set a parent
221 if parent_id:
222 try:
223 parent_doc = get_object_or_404(Document, id=parent_id)
224 rev_form.instance.document.parent = parent_doc
225 doc.parent = parent_doc
226 rev_form.instance.based_on.document = doc.original
227 except Document.DoesNotExist:
228 pass
229
230 rev_form.save(doc)
231 # If this is an Ajax POST, then return a JsonResponse
232 if request.is_ajax():
233 data = {
234 "error": False,
235 "new_revision_id": rev_form.instance.id,
236 }
237
238 return JsonResponse(data)
239
240 # Construct the redirect URL, adding any needed parameters
241 url = doc.get_absolute_url()
242 params = {}
243 # Parameter for the document saved, so that we can delete the cached draft on load
244 params["rev_saved"] = request.POST.get("current_rev", "")
245 url = "%s?%s" % (url, urlencode(params))
246 return redirect(url)
247 else:
248 # If this is an Ajax POST, then return a JsonResponse with error
249 if request.is_ajax():
250 if "current_rev" in rev_form._errors:
251 # Make the error message safe so the '<' and '>' don't
252 # get turned into '<' and '>', respectively
253 rev_form.errors["current_rev"][0] = mark_safe(
254 rev_form.errors["current_rev"][0]
255 )
256 errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]
257 data = {
258 "error": True,
259 "error_message": errors,
260 "new_revision_id": rev_form.instance.id,
261 }
262 return JsonResponse(data=data)
263
264 if doc:
265 from_id = smart_int(request.GET.get("from"), None)
266 to_id = smart_int(request.GET.get("to"), None)
267
268 revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)
269 revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)
270 else:
271 revision_from = revision_to = None
272
273 parent_split = split_slug(parent_doc.slug)
274
275 language = language_mapping[document_locale.lower()]
276 default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]
277
278 context = {
279 "parent": parent_doc,
280 "document": doc,
281 "document_form": doc_form,
282 "revision_form": rev_form,
283 "locale": document_locale,
284 "default_locale": default_locale,
285 "language": language,
286 "based_on": based_on_rev,
287 "disclose_description": disclose_description,
288 "discard_href": discard_href,
289 "attachment_form": AttachmentRevisionForm(),
290 "specific_slug": parent_split["specific"],
291 "parent_slug": parent_split["parent"],
292 "revision_from": revision_from,
293 "revision_to": revision_to,
294 }
295 return render(request, "wiki/translate.html", context)
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py
--- a/kuma/wiki/views/translate.py
+++ b/kuma/wiki/views/translate.py
@@ -220,7 +220,10 @@
# Attempt to set a parent
if parent_id:
try:
- parent_doc = get_object_or_404(Document, id=parent_id)
+ try:
+ parent_doc = Document.all_objects.get(id=parent_id)
+ except Document.DoesNotExist:
+ raise Http404("Parent document does not exist")
rev_form.instance.document.parent = parent_doc
doc.parent = parent_doc
rev_form.instance.based_on.document = doc.original
| {"golden_diff": "diff --git a/kuma/wiki/views/translate.py b/kuma/wiki/views/translate.py\n--- a/kuma/wiki/views/translate.py\n+++ b/kuma/wiki/views/translate.py\n@@ -220,7 +220,10 @@\n # Attempt to set a parent\n if parent_id:\n try:\n- parent_doc = get_object_or_404(Document, id=parent_id)\n+ try:\n+ parent_doc = Document.all_objects.get(id=parent_id)\n+ except Document.DoesNotExist:\n+ raise Http404(\"Parent document does not exist\")\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n", "issue": "/ko/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype\n# Request type\r\n<!-- Select the appropriate option -->\r\n- [ ] Please close this issue, I accidentally submitted it without adding any details\r\n- [ ] New documentation\r\n- [x] Correction or update\r\n\r\n\r\n# Details\r\nI was trying to make it redirect to the main `Array` document, but I can't edit it. (404)\r\nSeems like that if the upstream document (`/en-US/docs/Web/JavaScript/Reference/Global_Objects/Array/prototype`) is removed, localized ones throw 404 in `$edit`.\n", "before_files": [{"content": "from urllib.parse import urlencode\n\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.cache import never_cache\n\nimport kuma.wiki.content\nfrom kuma.attachments.forms import AttachmentRevisionForm\nfrom kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required\nfrom kuma.core.i18n import get_language_mapping\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import get_object_or_none, smart_int, urlparams\n\nfrom .utils import document_form_initial, split_slug\nfrom ..decorators import check_readonly, prevent_indexing, process_document_path\nfrom ..forms import DocumentForm, RevisionForm\nfrom ..models import Document, Revision\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@process_document_path\ndef select_locale(request, document_slug, document_locale):\n \"\"\"\n Select a locale to translate the document to.\n \"\"\"\n doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n return render(request, \"wiki/select_locale.html\", {\"document\": doc})\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@csp_update(SCRIPT_SRC=\"'unsafe-eval'\") # Required until CKEditor 4.7\n@process_document_path\n@check_readonly\n@prevent_indexing\ndef translate(request, document_slug, document_locale):\n \"\"\"\n Create a new translation of a wiki document.\n\n * document_slug is for the default locale\n * translation is to the request locale\n \"\"\"\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n\n # The parent document to translate from\n try:\n # Use '.all_objects' because the parent might have been soft deleted.\n # And if we don't respect that fact, it would become impossible to\n # edit a the child of it.\n parent_doc = Document.all_objects.get(\n locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug\n )\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n\n # Get the mapping here and now so it can be used for input validation\n language_mapping = get_language_mapping()\n\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.GET.get(\"tolocale\", document_locale)\n if document_locale.lower() not in language_mapping:\n # The 'tolocale' query string parameters aren't free-text. They're\n # explicitly listed on the \"Select language\" page (`...$locales`)\n # If a locale was entered that wasn't a link it's a user bug.\n raise Http404\n\n # Set a \"Discard Changes\" page\n discard_href = \"\"\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return redirect(\n reverse(\n \"wiki.edit\",\n locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.slug],\n )\n )\n\n if not parent_doc.is_localizable:\n message = _(\"You cannot translate this document.\")\n context = {\"message\": message}\n return render(request, \"handlers/400.html\", context, status=400)\n\n based_on_rev = parent_doc.current_or_latest_revision()\n\n disclose_description = bool(request.GET.get(\"opendescription\"))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n if parent_doc.parent_topic:\n try:\n parent_topic_translated_doc = parent_doc.parent_topic.translations.get(\n locale=document_locale\n )\n slug_dict = split_slug(\n parent_topic_translated_doc.slug + \"/\" + slug_dict[\"specific\"]\n )\n except ObjectDoesNotExist:\n pass\n\n user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))\n\n doc_form = None\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict[\"specific\"]\n doc_initial = document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {\"title\": based_on_rev.title, \"slug\": slug_dict[\"specific\"]}\n doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict[\"parent\"])\n\n initial = {\n \"based_on\": based_on_rev.id,\n \"current_rev\": doc.current_or_latest_revision().id if doc else None,\n \"comment\": \"\",\n \"toc_depth\": based_on_rev.toc_depth,\n \"localization_tags\": [\"inprogress\"],\n }\n content = None\n if not doc:\n content = based_on_rev.content\n if content:\n # TODO: There will be no need to \"filterEditorSafety\" when the code\n # that calls \"clean_content\" on Revision.save is deployed to\n # production, AND the current revisions of all docs have had\n # their content cleaned with \"clean_content\".\n initial.update(\n content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()\n )\n instance = doc and doc.current_or_latest_revision()\n rev_form = RevisionForm(\n request=request,\n instance=instance,\n initial=initial,\n parent_slug=slug_dict[\"parent\"],\n )\n\n if request.method == \"POST\":\n which_form = request.POST.get(\"form-type\", \"both\")\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get(\"slug\", slug_dict[\"specific\"])\n\n if user_has_doc_perm and which_form in [\"doc\", \"both\"]:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({\"locale\": document_locale})\n\n doc_form = DocumentForm(\n post_data, instance=doc, parent_slug=slug_dict[\"parent\"]\n )\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n\n if which_form == \"both\":\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == \"doc\" or rev_form.is_valid()):\n doc = doc_form.save(parent=parent_doc)\n\n if which_form == \"doc\":\n url = urlparams(doc.get_edit_url(), opendescription=1)\n return redirect(url)\n else:\n doc_form.data[\"slug\"] = posted_slug\n doc_form_invalid = True\n\n if doc and which_form in [\"rev\", \"both\"]:\n post_data = request.POST.copy()\n if \"slug\" not in post_data:\n post_data[\"slug\"] = posted_slug\n\n # update the post data with the toc_depth of original\n post_data[\"toc_depth\"] = based_on_rev.toc_depth\n\n # Pass in the locale for the akistmet \"blog_lang\".\n post_data[\"locale\"] = document_locale\n\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n parent_id = request.POST.get(\"parent_id\", \"\")\n\n # Attempt to set a parent\n if parent_id:\n try:\n parent_doc = get_object_or_404(Document, id=parent_id)\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n except Document.DoesNotExist:\n pass\n\n rev_form.save(doc)\n # If this is an Ajax POST, then return a JsonResponse\n if request.is_ajax():\n data = {\n \"error\": False,\n \"new_revision_id\": rev_form.instance.id,\n }\n\n return JsonResponse(data)\n\n # Construct the redirect URL, adding any needed parameters\n url = doc.get_absolute_url()\n params = {}\n # Parameter for the document saved, so that we can delete the cached draft on load\n params[\"rev_saved\"] = request.POST.get(\"current_rev\", \"\")\n url = \"%s?%s\" % (url, urlencode(params))\n return redirect(url)\n else:\n # If this is an Ajax POST, then return a JsonResponse with error\n if request.is_ajax():\n if \"current_rev\" in rev_form._errors:\n # Make the error message safe so the '<' and '>' don't\n # get turned into '<' and '>', respectively\n rev_form.errors[\"current_rev\"][0] = mark_safe(\n rev_form.errors[\"current_rev\"][0]\n )\n errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]\n data = {\n \"error\": True,\n \"error_message\": errors,\n \"new_revision_id\": rev_form.instance.id,\n }\n return JsonResponse(data=data)\n\n if doc:\n from_id = smart_int(request.GET.get(\"from\"), None)\n to_id = smart_int(request.GET.get(\"to\"), None)\n\n revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)\n revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)\n else:\n revision_from = revision_to = None\n\n parent_split = split_slug(parent_doc.slug)\n\n language = language_mapping[document_locale.lower()]\n default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]\n\n context = {\n \"parent\": parent_doc,\n \"document\": doc,\n \"document_form\": doc_form,\n \"revision_form\": rev_form,\n \"locale\": document_locale,\n \"default_locale\": default_locale,\n \"language\": language,\n \"based_on\": based_on_rev,\n \"disclose_description\": disclose_description,\n \"discard_href\": discard_href,\n \"attachment_form\": AttachmentRevisionForm(),\n \"specific_slug\": parent_split[\"specific\"],\n \"parent_slug\": parent_split[\"parent\"],\n \"revision_from\": revision_from,\n \"revision_to\": revision_to,\n }\n return render(request, \"wiki/translate.html\", context)\n", "path": "kuma/wiki/views/translate.py"}], "after_files": [{"content": "from urllib.parse import urlencode\n\nfrom csp.decorators import csp_update\nfrom django.conf import settings\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.http import Http404, JsonResponse\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.safestring import mark_safe\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.decorators.cache import never_cache\n\nimport kuma.wiki.content\nfrom kuma.attachments.forms import AttachmentRevisionForm\nfrom kuma.core.decorators import block_user_agents, ensure_wiki_domain, login_required\nfrom kuma.core.i18n import get_language_mapping\nfrom kuma.core.urlresolvers import reverse\nfrom kuma.core.utils import get_object_or_none, smart_int, urlparams\n\nfrom .utils import document_form_initial, split_slug\nfrom ..decorators import check_readonly, prevent_indexing, process_document_path\nfrom ..forms import DocumentForm, RevisionForm\nfrom ..models import Document, Revision\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@process_document_path\ndef select_locale(request, document_slug, document_locale):\n \"\"\"\n Select a locale to translate the document to.\n \"\"\"\n doc = get_object_or_404(Document, locale=document_locale, slug=document_slug)\n return render(request, \"wiki/select_locale.html\", {\"document\": doc})\n\n\n@ensure_wiki_domain\n@never_cache\n@block_user_agents\n@login_required\n@csp_update(SCRIPT_SRC=\"'unsafe-eval'\") # Required until CKEditor 4.7\n@process_document_path\n@check_readonly\n@prevent_indexing\ndef translate(request, document_slug, document_locale):\n \"\"\"\n Create a new translation of a wiki document.\n\n * document_slug is for the default locale\n * translation is to the request locale\n \"\"\"\n # TODO: Refactor this view into two views? (new, edit)\n # That might help reduce the headache-inducing branchiness.\n\n # The parent document to translate from\n try:\n # Use '.all_objects' because the parent might have been soft deleted.\n # And if we don't respect that fact, it would become impossible to\n # edit a the child of it.\n parent_doc = Document.all_objects.get(\n locale=settings.WIKI_DEFAULT_LANGUAGE, slug=document_slug\n )\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n\n # Get the mapping here and now so it can be used for input validation\n language_mapping = get_language_mapping()\n\n # HACK: Seems weird, but sticking the translate-to locale in a query\n # param is the best way to avoid the MindTouch-legacy locale\n # redirection logic.\n document_locale = request.GET.get(\"tolocale\", document_locale)\n if document_locale.lower() not in language_mapping:\n # The 'tolocale' query string parameters aren't free-text. They're\n # explicitly listed on the \"Select language\" page (`...$locales`)\n # If a locale was entered that wasn't a link it's a user bug.\n raise Http404\n\n # Set a \"Discard Changes\" page\n discard_href = \"\"\n\n if settings.WIKI_DEFAULT_LANGUAGE == document_locale:\n # Don't translate to the default language.\n return redirect(\n reverse(\n \"wiki.edit\",\n locale=settings.WIKI_DEFAULT_LANGUAGE,\n args=[parent_doc.slug],\n )\n )\n\n if not parent_doc.is_localizable:\n message = _(\"You cannot translate this document.\")\n context = {\"message\": message}\n return render(request, \"handlers/400.html\", context, status=400)\n\n based_on_rev = parent_doc.current_or_latest_revision()\n\n disclose_description = bool(request.GET.get(\"opendescription\"))\n\n try:\n doc = parent_doc.translations.get(locale=document_locale)\n slug_dict = split_slug(doc.slug)\n except Document.DoesNotExist:\n doc = None\n disclose_description = True\n slug_dict = split_slug(document_slug)\n\n # Find the \"real\" parent topic, which is its translation\n if parent_doc.parent_topic:\n try:\n parent_topic_translated_doc = parent_doc.parent_topic.translations.get(\n locale=document_locale\n )\n slug_dict = split_slug(\n parent_topic_translated_doc.slug + \"/\" + slug_dict[\"specific\"]\n )\n except ObjectDoesNotExist:\n pass\n\n user_has_doc_perm = (not doc) or (doc and doc.allows_editing_by(request.user))\n\n doc_form = None\n if user_has_doc_perm:\n if doc:\n # If there's an existing doc, populate form from it.\n discard_href = doc.get_absolute_url()\n doc.slug = slug_dict[\"specific\"]\n doc_initial = document_form_initial(doc)\n else:\n # If no existing doc, bring over the original title and slug.\n discard_href = parent_doc.get_absolute_url()\n doc_initial = {\"title\": based_on_rev.title, \"slug\": slug_dict[\"specific\"]}\n doc_form = DocumentForm(initial=doc_initial, parent_slug=slug_dict[\"parent\"])\n\n initial = {\n \"based_on\": based_on_rev.id,\n \"current_rev\": doc.current_or_latest_revision().id if doc else None,\n \"comment\": \"\",\n \"toc_depth\": based_on_rev.toc_depth,\n \"localization_tags\": [\"inprogress\"],\n }\n content = None\n if not doc:\n content = based_on_rev.content\n if content:\n # TODO: There will be no need to \"filterEditorSafety\" when the code\n # that calls \"clean_content\" on Revision.save is deployed to\n # production, AND the current revisions of all docs have had\n # their content cleaned with \"clean_content\".\n initial.update(\n content=kuma.wiki.content.parse(content).filterEditorSafety().serialize()\n )\n instance = doc and doc.current_or_latest_revision()\n rev_form = RevisionForm(\n request=request,\n instance=instance,\n initial=initial,\n parent_slug=slug_dict[\"parent\"],\n )\n\n if request.method == \"POST\":\n which_form = request.POST.get(\"form-type\", \"both\")\n doc_form_invalid = False\n\n # Grab the posted slug value in case it's invalid\n posted_slug = request.POST.get(\"slug\", slug_dict[\"specific\"])\n\n if user_has_doc_perm and which_form in [\"doc\", \"both\"]:\n disclose_description = True\n post_data = request.POST.copy()\n\n post_data.update({\"locale\": document_locale})\n\n doc_form = DocumentForm(\n post_data, instance=doc, parent_slug=slug_dict[\"parent\"]\n )\n doc_form.instance.locale = document_locale\n doc_form.instance.parent = parent_doc\n\n if which_form == \"both\":\n # Sending a new copy of post so the slug change above\n # doesn't cause problems during validation\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n\n # If we are submitting the whole form, we need to check that\n # the Revision is valid before saving the Document.\n if doc_form.is_valid() and (which_form == \"doc\" or rev_form.is_valid()):\n doc = doc_form.save(parent=parent_doc)\n\n if which_form == \"doc\":\n url = urlparams(doc.get_edit_url(), opendescription=1)\n return redirect(url)\n else:\n doc_form.data[\"slug\"] = posted_slug\n doc_form_invalid = True\n\n if doc and which_form in [\"rev\", \"both\"]:\n post_data = request.POST.copy()\n if \"slug\" not in post_data:\n post_data[\"slug\"] = posted_slug\n\n # update the post data with the toc_depth of original\n post_data[\"toc_depth\"] = based_on_rev.toc_depth\n\n # Pass in the locale for the akistmet \"blog_lang\".\n post_data[\"locale\"] = document_locale\n\n rev_form = RevisionForm(\n request=request, data=post_data, parent_slug=slug_dict[\"parent\"]\n )\n rev_form.instance.document = doc # for rev_form.clean()\n\n if rev_form.is_valid() and not doc_form_invalid:\n parent_id = request.POST.get(\"parent_id\", \"\")\n\n # Attempt to set a parent\n if parent_id:\n try:\n try:\n parent_doc = Document.all_objects.get(id=parent_id)\n except Document.DoesNotExist:\n raise Http404(\"Parent document does not exist\")\n rev_form.instance.document.parent = parent_doc\n doc.parent = parent_doc\n rev_form.instance.based_on.document = doc.original\n except Document.DoesNotExist:\n pass\n\n rev_form.save(doc)\n # If this is an Ajax POST, then return a JsonResponse\n if request.is_ajax():\n data = {\n \"error\": False,\n \"new_revision_id\": rev_form.instance.id,\n }\n\n return JsonResponse(data)\n\n # Construct the redirect URL, adding any needed parameters\n url = doc.get_absolute_url()\n params = {}\n # Parameter for the document saved, so that we can delete the cached draft on load\n params[\"rev_saved\"] = request.POST.get(\"current_rev\", \"\")\n url = \"%s?%s\" % (url, urlencode(params))\n return redirect(url)\n else:\n # If this is an Ajax POST, then return a JsonResponse with error\n if request.is_ajax():\n if \"current_rev\" in rev_form._errors:\n # Make the error message safe so the '<' and '>' don't\n # get turned into '<' and '>', respectively\n rev_form.errors[\"current_rev\"][0] = mark_safe(\n rev_form.errors[\"current_rev\"][0]\n )\n errors = [rev_form.errors[key][0] for key in rev_form.errors.keys()]\n data = {\n \"error\": True,\n \"error_message\": errors,\n \"new_revision_id\": rev_form.instance.id,\n }\n return JsonResponse(data=data)\n\n if doc:\n from_id = smart_int(request.GET.get(\"from\"), None)\n to_id = smart_int(request.GET.get(\"to\"), None)\n\n revision_from = get_object_or_none(Revision, pk=from_id, document=doc.parent)\n revision_to = get_object_or_none(Revision, pk=to_id, document=doc.parent)\n else:\n revision_from = revision_to = None\n\n parent_split = split_slug(parent_doc.slug)\n\n language = language_mapping[document_locale.lower()]\n default_locale = language_mapping[settings.WIKI_DEFAULT_LANGUAGE.lower()]\n\n context = {\n \"parent\": parent_doc,\n \"document\": doc,\n \"document_form\": doc_form,\n \"revision_form\": rev_form,\n \"locale\": document_locale,\n \"default_locale\": default_locale,\n \"language\": language,\n \"based_on\": based_on_rev,\n \"disclose_description\": disclose_description,\n \"discard_href\": discard_href,\n \"attachment_form\": AttachmentRevisionForm(),\n \"specific_slug\": parent_split[\"specific\"],\n \"parent_slug\": parent_split[\"parent\"],\n \"revision_from\": revision_from,\n \"revision_to\": revision_to,\n }\n return render(request, \"wiki/translate.html\", context)\n", "path": "kuma/wiki/views/translate.py"}]} | 3,583 | 157 |
gh_patches_debug_25133 | rasdani/github-patches | git_diff | fedora-infra__bodhi-3634 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reset multi builds update that failed merging a side tag to pending
If a multi builds update fails to be merge in stable, the approve testing cron job will add a new comment to the update every times it run.
To avoid this we need to reset the update status to pending, but we also need to be able to move back the update to testing when the update is edited (build added or build removed).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bodhi/server/tasks/updates.py`
Content:
```
1 # Copyright 2015-2019 Red Hat Inc., and others.
2 #
3 # This file is part of Bodhi.
4 #
5 # This program is free software; you can redistribute it and/or
6 # modify it under the terms of the GNU General Public License
7 # as published by the Free Software Foundation; either version 2
8 # of the License, or (at your option) any later version.
9 #
10 # This program is distributed in the hope that it will be useful,
11 # but WITHOUT ANY WARRANTY; without even the implied warranty of
12 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 # GNU General Public License for more details.
14 #
15 # You should have received a copy of the GNU General Public License along with
16 # this program; if not, write to the Free Software Foundation, Inc., 51
17 # Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
18 """
19 The "updates handler".
20
21 This module is responsible for doing value-added work "offline" that used to be
22 done when updates were submitted. Specifically, when someone submits an update
23 we used to:
24
25 - Update any bugs in bugzilla associated with the update.
26 - Check for test cases in the wiki.
27
28 Those things could sometimes take a *very* long time, especially if there were
29 lots of builds and lots of bugs in the update.
30
31 Now, update-submission breezes by those steps and simply tells the user "OK".
32 A message gets published when their update goes through, and *that* message
33 gets received here and triggers us to do all that network-laden heavy lifting.
34 """
35
36 import logging
37 import time
38
39 from bodhi.server import util, bugs as bug_module
40 from bodhi.server.config import config
41 from bodhi.server.exceptions import BodhiException
42 from bodhi.server.models import Bug, Update, UpdateType
43
44
45 log = logging.getLogger('bodhi')
46
47
48 class UpdatesHandler:
49 """
50 Perform background tasks when updates are created or edited.
51
52 This Celery task is run when an update is created or editied in the frontend,
53 and performs background tasks such as modifying Bugzilla issues (and loading information from
54 Bugzilla so we can display it to the user) and looking up wiki test cases.
55
56 Attributes:
57 db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a
58 database session.
59 handle_bugs (bool): If True, interact with Bugzilla. Else do not.
60 """
61
62 def __init__(self, *args, **kwargs):
63 """Initialize the UpdatesHandler."""
64 self.db_factory = util.transactional_session_maker()
65
66 self.handle_bugs = bool(config.get('bodhi_email'))
67 if not self.handle_bugs:
68 log.warning("No bodhi_email defined; not fetching bug details")
69
70 def run(self, api_version: int, data: dict):
71 """
72 Process the given message, updating relevant bugs and test cases.
73
74 Duplicate messages: if the server delivers the message multiple times,
75 the bugs and test cases are simply re-fetched and updated, so nothing
76 bad happens.
77
78 Args:
79 api_version: API version number.
80 data: Information about a new or edited update.
81 """
82 action = data["action"]
83 alias = data['update'].get('alias')
84
85 log.info("Updates Handler handling %s, %s" % (alias, action))
86
87 # Go to sleep for a second to try and avoid a race condition
88 # https://github.com/fedora-infra/bodhi/issues/458
89 time.sleep(1)
90
91 with self.db_factory() as session:
92 update = Update.get(alias)
93 if not update:
94 raise BodhiException("Couldn't find alias '%s' in DB" % alias)
95
96 bugs = []
97 if action == "edit":
98 for idx in data['new_bugs']:
99 bug = Bug.get(idx)
100
101 # Sanity check
102 if bug is None or bug not in update.bugs:
103 update_bugs_ids = [b.bug_id for b in update.bugs]
104 update.update_bugs(update_bugs_ids + [idx], session)
105
106 # Now, after update.update_bugs, bug with idx should exists in DB
107 bug = Bug.get(idx)
108
109 bugs.append(bug)
110
111 elif action == "testing":
112 bugs = update.bugs
113 else:
114 raise NotImplementedError("Should never get here.")
115
116 self.work_on_bugs(session, update, bugs)
117 self.fetch_test_cases(session, update)
118
119 if config['test_gating.required']:
120 with self.db_factory() as session:
121 update = Update.get(alias)
122 update.update_test_gating_status()
123
124 log.info("Updates Handler done with %s, %s" % (alias, action))
125
126 def fetch_test_cases(self, session, update):
127 """
128 Query the wiki for test cases for each package on the given update.
129
130 Args:
131 session (sqlalchemy.orm.session.Session): A database session.
132 update (bodhi.server.models.Update): The update's builds are iterated upon to find test
133 cases for their associated Packages..
134 """
135 for build in update.builds:
136 try:
137 build.package.fetch_test_cases(session)
138 except BodhiException:
139 log.warning('Error occurred during fetching testcases', exc_info=True)
140
141 def work_on_bugs(self, session, update, bugs):
142 """
143 Iterate the list of bugs, retrieving information from Bugzilla and modifying them.
144
145 Iterate the given list of bugs associated with the given update. For each bug, retrieve
146 details from Bugzilla, comment on the bug to let watchers know about the update, and mark
147 the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.
148
149 If handle_bugs is not True, return and do nothing.
150
151 Args:
152 session (sqlalchemy.orm.session.Session): A database session.
153 update (bodhi.server.models.Update): The update that the bugs are associated with.
154 bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.
155 """
156 if not self.handle_bugs:
157 log.warning("Not configured to handle bugs")
158 return
159
160 log.info("Got %i bugs to sync for %r" % (len(bugs), update.alias))
161 for bug in bugs:
162 log.info("Getting RHBZ bug %r" % bug.bug_id)
163 try:
164 rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)
165
166 log.info("Updating our details for %r" % bug.bug_id)
167 bug.update_details(rhbz_bug)
168 log.info(" Got title %r for %r" % (bug.title, bug.bug_id))
169
170 # If you set the type of your update to 'enhancement' but you
171 # attach a security bug, we automatically change the type of your
172 # update to 'security'. We need to do this first, so we don't
173 # accidentally comment on stuff that we shouldn't.
174 if bug.security:
175 log.info("Setting our UpdateType to security.")
176 update.type = UpdateType.security
177
178 log.info("Commenting on %r" % bug.bug_id)
179 comment = config['initial_bug_msg'] % (
180 update.alias, update.release.long_name, update.abs_url())
181
182 log.info("Modifying %r" % bug.bug_id)
183 bug.modified(update, comment)
184 except Exception:
185 log.warning('Error occurred during updating single bug', exc_info=True)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/bodhi/server/tasks/updates.py b/bodhi/server/tasks/updates.py
--- a/bodhi/server/tasks/updates.py
+++ b/bodhi/server/tasks/updates.py
@@ -36,10 +36,12 @@
import logging
import time
+from sqlalchemy import func
+
from bodhi.server import util, bugs as bug_module
from bodhi.server.config import config
from bodhi.server.exceptions import BodhiException
-from bodhi.server.models import Bug, Update, UpdateType
+from bodhi.server.models import Bug, Update, UpdateType, UpdateStatus
log = logging.getLogger('bodhi')
@@ -95,6 +97,19 @@
bugs = []
if action == "edit":
+ # If editing a Pending update, all of whose builds are signed, for a release
+ # which isn't composed by Bodhi (i.e. Rawhide), move it directly to Testing.
+ if not update.release.composed_by_bodhi \
+ and update.status == UpdateStatus.pending \
+ and update.signed:
+ log.info("Every build in the update is signed, set status to testing")
+
+ update.status = UpdateStatus.testing
+ update.date_testing = func.current_timestamp()
+ update.request = None
+
+ log.info(f"Update status of {update.display_name} has been set to testing")
+
for idx in data['new_bugs']:
bug = Bug.get(idx)
| {"golden_diff": "diff --git a/bodhi/server/tasks/updates.py b/bodhi/server/tasks/updates.py\n--- a/bodhi/server/tasks/updates.py\n+++ b/bodhi/server/tasks/updates.py\n@@ -36,10 +36,12 @@\n import logging\n import time\n \n+from sqlalchemy import func\n+\n from bodhi.server import util, bugs as bug_module\n from bodhi.server.config import config\n from bodhi.server.exceptions import BodhiException\n-from bodhi.server.models import Bug, Update, UpdateType\n+from bodhi.server.models import Bug, Update, UpdateType, UpdateStatus\n \n \n log = logging.getLogger('bodhi')\n@@ -95,6 +97,19 @@\n \n bugs = []\n if action == \"edit\":\n+ # If editing a Pending update, all of whose builds are signed, for a release\n+ # which isn't composed by Bodhi (i.e. Rawhide), move it directly to Testing.\n+ if not update.release.composed_by_bodhi \\\n+ and update.status == UpdateStatus.pending \\\n+ and update.signed:\n+ log.info(\"Every build in the update is signed, set status to testing\")\n+\n+ update.status = UpdateStatus.testing\n+ update.date_testing = func.current_timestamp()\n+ update.request = None\n+\n+ log.info(f\"Update status of {update.display_name} has been set to testing\")\n+\n for idx in data['new_bugs']:\n bug = Bug.get(idx)\n", "issue": "Reset multi builds update that failed merging a side tag to pending\nIf a multi builds update fails to be merge in stable, the approve testing cron job will add a new comment to the update every times it run.\r\nTo avoid this we need to reset the update status to pending, but we also need to be able to move back the update to testing when the update is edited (build added or build removed).\n", "before_files": [{"content": "# Copyright 2015-2019 Red Hat Inc., and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"updates handler\".\n\nThis module is responsible for doing value-added work \"offline\" that used to be\ndone when updates were submitted. Specifically, when someone submits an update\nwe used to:\n\n- Update any bugs in bugzilla associated with the update.\n- Check for test cases in the wiki.\n\nThose things could sometimes take a *very* long time, especially if there were\nlots of builds and lots of bugs in the update.\n\nNow, update-submission breezes by those steps and simply tells the user \"OK\".\nA message gets published when their update goes through, and *that* message\ngets received here and triggers us to do all that network-laden heavy lifting.\n\"\"\"\n\nimport logging\nimport time\n\nfrom bodhi.server import util, bugs as bug_module\nfrom bodhi.server.config import config\nfrom bodhi.server.exceptions import BodhiException\nfrom bodhi.server.models import Bug, Update, UpdateType\n\n\nlog = logging.getLogger('bodhi')\n\n\nclass UpdatesHandler:\n \"\"\"\n Perform background tasks when updates are created or edited.\n\n This Celery task is run when an update is created or editied in the frontend,\n and performs background tasks such as modifying Bugzilla issues (and loading information from\n Bugzilla so we can display it to the user) and looking up wiki test cases.\n\n Attributes:\n db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a\n database session.\n handle_bugs (bool): If True, interact with Bugzilla. Else do not.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the UpdatesHandler.\"\"\"\n self.db_factory = util.transactional_session_maker()\n\n self.handle_bugs = bool(config.get('bodhi_email'))\n if not self.handle_bugs:\n log.warning(\"No bodhi_email defined; not fetching bug details\")\n\n def run(self, api_version: int, data: dict):\n \"\"\"\n Process the given message, updating relevant bugs and test cases.\n\n Duplicate messages: if the server delivers the message multiple times,\n the bugs and test cases are simply re-fetched and updated, so nothing\n bad happens.\n\n Args:\n api_version: API version number.\n data: Information about a new or edited update.\n \"\"\"\n action = data[\"action\"]\n alias = data['update'].get('alias')\n\n log.info(\"Updates Handler handling %s, %s\" % (alias, action))\n\n # Go to sleep for a second to try and avoid a race condition\n # https://github.com/fedora-infra/bodhi/issues/458\n time.sleep(1)\n\n with self.db_factory() as session:\n update = Update.get(alias)\n if not update:\n raise BodhiException(\"Couldn't find alias '%s' in DB\" % alias)\n\n bugs = []\n if action == \"edit\":\n for idx in data['new_bugs']:\n bug = Bug.get(idx)\n\n # Sanity check\n if bug is None or bug not in update.bugs:\n update_bugs_ids = [b.bug_id for b in update.bugs]\n update.update_bugs(update_bugs_ids + [idx], session)\n\n # Now, after update.update_bugs, bug with idx should exists in DB\n bug = Bug.get(idx)\n\n bugs.append(bug)\n\n elif action == \"testing\":\n bugs = update.bugs\n else:\n raise NotImplementedError(\"Should never get here.\")\n\n self.work_on_bugs(session, update, bugs)\n self.fetch_test_cases(session, update)\n\n if config['test_gating.required']:\n with self.db_factory() as session:\n update = Update.get(alias)\n update.update_test_gating_status()\n\n log.info(\"Updates Handler done with %s, %s\" % (alias, action))\n\n def fetch_test_cases(self, session, update):\n \"\"\"\n Query the wiki for test cases for each package on the given update.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update's builds are iterated upon to find test\n cases for their associated Packages..\n \"\"\"\n for build in update.builds:\n try:\n build.package.fetch_test_cases(session)\n except BodhiException:\n log.warning('Error occurred during fetching testcases', exc_info=True)\n\n def work_on_bugs(self, session, update, bugs):\n \"\"\"\n Iterate the list of bugs, retrieving information from Bugzilla and modifying them.\n\n Iterate the given list of bugs associated with the given update. For each bug, retrieve\n details from Bugzilla, comment on the bug to let watchers know about the update, and mark\n the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.\n\n If handle_bugs is not True, return and do nothing.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update that the bugs are associated with.\n bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.\n \"\"\"\n if not self.handle_bugs:\n log.warning(\"Not configured to handle bugs\")\n return\n\n log.info(\"Got %i bugs to sync for %r\" % (len(bugs), update.alias))\n for bug in bugs:\n log.info(\"Getting RHBZ bug %r\" % bug.bug_id)\n try:\n rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)\n\n log.info(\"Updating our details for %r\" % bug.bug_id)\n bug.update_details(rhbz_bug)\n log.info(\" Got title %r for %r\" % (bug.title, bug.bug_id))\n\n # If you set the type of your update to 'enhancement' but you\n # attach a security bug, we automatically change the type of your\n # update to 'security'. We need to do this first, so we don't\n # accidentally comment on stuff that we shouldn't.\n if bug.security:\n log.info(\"Setting our UpdateType to security.\")\n update.type = UpdateType.security\n\n log.info(\"Commenting on %r\" % bug.bug_id)\n comment = config['initial_bug_msg'] % (\n update.alias, update.release.long_name, update.abs_url())\n\n log.info(\"Modifying %r\" % bug.bug_id)\n bug.modified(update, comment)\n except Exception:\n log.warning('Error occurred during updating single bug', exc_info=True)\n", "path": "bodhi/server/tasks/updates.py"}], "after_files": [{"content": "# Copyright 2015-2019 Red Hat Inc., and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe \"updates handler\".\n\nThis module is responsible for doing value-added work \"offline\" that used to be\ndone when updates were submitted. Specifically, when someone submits an update\nwe used to:\n\n- Update any bugs in bugzilla associated with the update.\n- Check for test cases in the wiki.\n\nThose things could sometimes take a *very* long time, especially if there were\nlots of builds and lots of bugs in the update.\n\nNow, update-submission breezes by those steps and simply tells the user \"OK\".\nA message gets published when their update goes through, and *that* message\ngets received here and triggers us to do all that network-laden heavy lifting.\n\"\"\"\n\nimport logging\nimport time\n\nfrom sqlalchemy import func\n\nfrom bodhi.server import util, bugs as bug_module\nfrom bodhi.server.config import config\nfrom bodhi.server.exceptions import BodhiException\nfrom bodhi.server.models import Bug, Update, UpdateType, UpdateStatus\n\n\nlog = logging.getLogger('bodhi')\n\n\nclass UpdatesHandler:\n \"\"\"\n Perform background tasks when updates are created or edited.\n\n This Celery task is run when an update is created or editied in the frontend,\n and performs background tasks such as modifying Bugzilla issues (and loading information from\n Bugzilla so we can display it to the user) and looking up wiki test cases.\n\n Attributes:\n db_factory (bodhi.server.util.TransactionalSessionMaker): A context manager that yields a\n database session.\n handle_bugs (bool): If True, interact with Bugzilla. Else do not.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Initialize the UpdatesHandler.\"\"\"\n self.db_factory = util.transactional_session_maker()\n\n self.handle_bugs = bool(config.get('bodhi_email'))\n if not self.handle_bugs:\n log.warning(\"No bodhi_email defined; not fetching bug details\")\n\n def run(self, api_version: int, data: dict):\n \"\"\"\n Process the given message, updating relevant bugs and test cases.\n\n Duplicate messages: if the server delivers the message multiple times,\n the bugs and test cases are simply re-fetched and updated, so nothing\n bad happens.\n\n Args:\n api_version: API version number.\n data: Information about a new or edited update.\n \"\"\"\n action = data[\"action\"]\n alias = data['update'].get('alias')\n\n log.info(\"Updates Handler handling %s, %s\" % (alias, action))\n\n # Go to sleep for a second to try and avoid a race condition\n # https://github.com/fedora-infra/bodhi/issues/458\n time.sleep(1)\n\n with self.db_factory() as session:\n update = Update.get(alias)\n if not update:\n raise BodhiException(\"Couldn't find alias '%s' in DB\" % alias)\n\n bugs = []\n if action == \"edit\":\n # If editing a Pending update, all of whose builds are signed, for a release\n # which isn't composed by Bodhi (i.e. Rawhide), move it directly to Testing.\n if not update.release.composed_by_bodhi \\\n and update.status == UpdateStatus.pending \\\n and update.signed:\n log.info(\"Every build in the update is signed, set status to testing\")\n\n update.status = UpdateStatus.testing\n update.date_testing = func.current_timestamp()\n update.request = None\n\n log.info(f\"Update status of {update.display_name} has been set to testing\")\n\n for idx in data['new_bugs']:\n bug = Bug.get(idx)\n\n # Sanity check\n if bug is None or bug not in update.bugs:\n update_bugs_ids = [b.bug_id for b in update.bugs]\n update.update_bugs(update_bugs_ids + [idx], session)\n\n # Now, after update.update_bugs, bug with idx should exists in DB\n bug = Bug.get(idx)\n\n bugs.append(bug)\n\n elif action == \"testing\":\n bugs = update.bugs\n else:\n raise NotImplementedError(\"Should never get here.\")\n\n self.work_on_bugs(session, update, bugs)\n self.fetch_test_cases(session, update)\n\n if config['test_gating.required']:\n with self.db_factory() as session:\n update = Update.get(alias)\n update.update_test_gating_status()\n\n log.info(\"Updates Handler done with %s, %s\" % (alias, action))\n\n def fetch_test_cases(self, session, update):\n \"\"\"\n Query the wiki for test cases for each package on the given update.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update's builds are iterated upon to find test\n cases for their associated Packages..\n \"\"\"\n for build in update.builds:\n try:\n build.package.fetch_test_cases(session)\n except BodhiException:\n log.warning('Error occurred during fetching testcases', exc_info=True)\n\n def work_on_bugs(self, session, update, bugs):\n \"\"\"\n Iterate the list of bugs, retrieving information from Bugzilla and modifying them.\n\n Iterate the given list of bugs associated with the given update. For each bug, retrieve\n details from Bugzilla, comment on the bug to let watchers know about the update, and mark\n the bug as MODIFIED. If the bug is a security issue, mark the update as a security update.\n\n If handle_bugs is not True, return and do nothing.\n\n Args:\n session (sqlalchemy.orm.session.Session): A database session.\n update (bodhi.server.models.Update): The update that the bugs are associated with.\n bugs (list): A list of bodhi.server.models.Bug instances that we wish to act on.\n \"\"\"\n if not self.handle_bugs:\n log.warning(\"Not configured to handle bugs\")\n return\n\n log.info(\"Got %i bugs to sync for %r\" % (len(bugs), update.alias))\n for bug in bugs:\n log.info(\"Getting RHBZ bug %r\" % bug.bug_id)\n try:\n rhbz_bug = bug_module.bugtracker.getbug(bug.bug_id)\n\n log.info(\"Updating our details for %r\" % bug.bug_id)\n bug.update_details(rhbz_bug)\n log.info(\" Got title %r for %r\" % (bug.title, bug.bug_id))\n\n # If you set the type of your update to 'enhancement' but you\n # attach a security bug, we automatically change the type of your\n # update to 'security'. We need to do this first, so we don't\n # accidentally comment on stuff that we shouldn't.\n if bug.security:\n log.info(\"Setting our UpdateType to security.\")\n update.type = UpdateType.security\n\n log.info(\"Commenting on %r\" % bug.bug_id)\n comment = config['initial_bug_msg'] % (\n update.alias, update.release.long_name, update.abs_url())\n\n log.info(\"Modifying %r\" % bug.bug_id)\n bug.modified(update, comment)\n except Exception:\n log.warning('Error occurred during updating single bug', exc_info=True)\n", "path": "bodhi/server/tasks/updates.py"}]} | 2,426 | 323 |
gh_patches_debug_35730 | rasdani/github-patches | git_diff | readthedocs__readthedocs.org-5086 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Custom robots.txt support?
We've talked about blowing away the protected designation, so not sure if it makes sense to put special case on the protected privacy level, but maybe a separate option for docs that shouldn't be crawled?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `readthedocs/core/urls/subdomain.py`
Content:
```
1 """URL configurations for subdomains."""
2 from __future__ import absolute_import
3
4 from functools import reduce
5 from operator import add
6
7 from django.conf.urls import url
8 from django.conf import settings
9 from django.conf.urls.static import static
10
11 from readthedocs.core.views.serve import (
12 redirect_page_with_filename,
13 redirect_project_slug, serve_docs
14 )
15 from readthedocs.core.views import (
16 server_error_500,
17 server_error_404,
18 )
19 from readthedocs.constants import pattern_opts
20
21 handler500 = server_error_500
22 handler404 = server_error_404
23
24 subdomain_urls = [
25 url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'
26 r'page/(?P<filename>.*)$'.format(**pattern_opts),
27 redirect_page_with_filename,
28 name='docs_detail'),
29
30 url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)$').format(**pattern_opts),
31 redirect_project_slug,
32 name='redirect_project_slug'),
33
34 url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'
35 r'(?P<lang_slug>{lang_slug})/'
36 r'(?P<version_slug>{version_slug})/'
37 r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),
38 serve_docs,
39 name='docs_detail'),
40 ]
41
42 groups = [subdomain_urls]
43
44 # Needed to serve media locally
45 if getattr(settings, 'DEBUG', False):
46 groups.insert(0, static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))
47
48 urlpatterns = reduce(add, groups)
49
```
Path: `readthedocs/core/views/serve.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 Doc serving from Python.
4
5 In production there are two modes,
6 * Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)
7 * Serving from private symlinks in Python (readthedocs.com only)
8
9 In development, we have two modes:
10 * Serving from public symlinks in Python
11 * Serving from private symlinks in Python
12
13 This means we should only serve from public symlinks in dev,
14 and generally default to serving from private symlinks in Python only.
15
16 Privacy
17 -------
18
19 These views will take into account the version privacy level.
20
21 Settings
22 --------
23
24 PYTHON_MEDIA (False) - Set this to True to serve docs & media from Python
25 SERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.
26 """
27
28 from __future__ import (
29 absolute_import, division, print_function, unicode_literals)
30
31 import logging
32 import mimetypes
33 import os
34 from functools import wraps
35
36 from django.conf import settings
37 from django.http import HttpResponse, HttpResponseRedirect, Http404
38 from django.shortcuts import get_object_or_404
39 from django.shortcuts import render
40 from django.utils.encoding import iri_to_uri
41 from django.views.static import serve
42
43 from readthedocs.builds.models import Version
44 from readthedocs.core.permissions import AdminPermission
45 from readthedocs.core.resolver import resolve, resolve_path
46 from readthedocs.core.symlink import PrivateSymlink, PublicSymlink
47 from readthedocs.projects import constants
48 from readthedocs.projects.models import Project, ProjectRelationship
49
50 log = logging.getLogger(__name__)
51
52
53 def map_subproject_slug(view_func):
54 """
55 A decorator that maps a ``subproject_slug`` URL param into a Project.
56
57 :raises: Http404 if the Project doesn't exist
58
59 .. warning:: Does not take into account any kind of privacy settings.
60 """
61 @wraps(view_func)
62 def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa
63 if subproject is None and subproject_slug:
64 # Try to fetch by subproject alias first, otherwise we might end up
65 # redirected to an unrelated project.
66 try:
67 # Depends on a project passed into kwargs
68 rel = ProjectRelationship.objects.get(
69 parent=kwargs['project'],
70 alias=subproject_slug,
71 )
72 subproject = rel.child
73 except (ProjectRelationship.DoesNotExist, KeyError):
74 subproject = get_object_or_404(Project, slug=subproject_slug)
75 return view_func(request, subproject=subproject, *args, **kwargs)
76
77 return inner_view
78
79
80 def map_project_slug(view_func):
81 """
82 A decorator that maps a ``project_slug`` URL param into a Project.
83
84 :raises: Http404 if the Project doesn't exist
85
86 .. warning:: Does not take into account any kind of privacy settings.
87 """
88 @wraps(view_func)
89 def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa
90 if project is None:
91 if not project_slug:
92 project_slug = request.slug
93 try:
94 project = Project.objects.get(slug=project_slug)
95 except Project.DoesNotExist:
96 raise Http404('Project does not exist.')
97 return view_func(request, project=project, *args, **kwargs)
98
99 return inner_view
100
101
102 @map_project_slug
103 @map_subproject_slug
104 def redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument
105 """Handle / -> /en/latest/ directs on subdomains."""
106 return HttpResponseRedirect(resolve(subproject or project))
107
108
109 @map_project_slug
110 @map_subproject_slug
111 def redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa
112 """Redirect /page/file.html to /en/latest/file.html."""
113 return HttpResponseRedirect(
114 resolve(subproject or project, filename=filename))
115
116
117 def _serve_401(request, project):
118 res = render(request, '401.html')
119 res.status_code = 401
120 log.debug('Unauthorized access to {0} documentation'.format(project.slug))
121 return res
122
123
124 def _serve_file(request, filename, basepath):
125 # Serve the file from the proper location
126 if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):
127 # Serve from Python
128 return serve(request, filename, basepath)
129
130 # Serve from Nginx
131 content_type, encoding = mimetypes.guess_type(
132 os.path.join(basepath, filename))
133 content_type = content_type or 'application/octet-stream'
134 response = HttpResponse(content_type=content_type)
135 if encoding:
136 response['Content-Encoding'] = encoding
137 try:
138 iri_path = os.path.join(
139 basepath[len(settings.SITE_ROOT):],
140 filename,
141 )
142 # NGINX does not support non-ASCII characters in the header, so we
143 # convert the IRI path to URI so it's compatible with what NGINX expects
144 # as the header value.
145 # https://github.com/benoitc/gunicorn/issues/1448
146 # https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling
147 x_accel_redirect = iri_to_uri(iri_path)
148 response['X-Accel-Redirect'] = x_accel_redirect
149 except UnicodeEncodeError:
150 raise Http404
151
152 return response
153
154
155 @map_project_slug
156 @map_subproject_slug
157 def serve_docs(
158 request, project, subproject, lang_slug=None, version_slug=None,
159 filename=''):
160 """Exists to map existing proj, lang, version, filename views to the file format."""
161 if not version_slug:
162 version_slug = project.get_default_version()
163 try:
164 version = project.versions.public(request.user).get(slug=version_slug)
165 except Version.DoesNotExist:
166 # Properly raise a 404 if the version doesn't exist (or is inactive) and
167 # a 401 if it does
168 if project.versions.filter(slug=version_slug, active=True).exists():
169 return _serve_401(request, project)
170 raise Http404('Version does not exist.')
171 filename = resolve_path(
172 subproject or project, # Resolve the subproject if it exists
173 version_slug=version_slug,
174 language=lang_slug,
175 filename=filename,
176 subdomain=True, # subdomain will make it a "full" path without a URL prefix
177 )
178 if (version.privacy_level == constants.PRIVATE and
179 not AdminPermission.is_member(user=request.user, obj=project)):
180 return _serve_401(request, project)
181 return _serve_symlink_docs(
182 request,
183 filename=filename,
184 project=project,
185 privacy_level=version.privacy_level,
186 )
187
188
189 @map_project_slug
190 def _serve_symlink_docs(request, project, privacy_level, filename=''):
191 """Serve a file by symlink, or a 404 if not found."""
192 # Handle indexes
193 if filename == '' or filename[-1] == '/':
194 filename += 'index.html'
195
196 # This breaks path joining, by ignoring the root when given an "absolute" path
197 if filename[0] == '/':
198 filename = filename[1:]
199
200 log.info('Serving %s for %s', filename, project)
201
202 files_tried = []
203
204 serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])
205
206 if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa
207 public_symlink = PublicSymlink(project)
208 basepath = public_symlink.project_root
209 if os.path.exists(os.path.join(basepath, filename)):
210 return _serve_file(request, filename, basepath)
211
212 files_tried.append(os.path.join(basepath, filename))
213
214 if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa
215 # Handle private
216 private_symlink = PrivateSymlink(project)
217 basepath = private_symlink.project_root
218
219 if os.path.exists(os.path.join(basepath, filename)):
220 return _serve_file(request, filename, basepath)
221
222 files_tried.append(os.path.join(basepath, filename))
223
224 raise Http404(
225 'File not found. Tried these files: %s' % ','.join(files_tried))
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/readthedocs/core/urls/subdomain.py b/readthedocs/core/urls/subdomain.py
--- a/readthedocs/core/urls/subdomain.py
+++ b/readthedocs/core/urls/subdomain.py
@@ -1,3 +1,5 @@
+# -*- coding: utf-8 -*-
+
"""URL configurations for subdomains."""
from __future__ import absolute_import
@@ -10,7 +12,7 @@
from readthedocs.core.views.serve import (
redirect_page_with_filename,
- redirect_project_slug, serve_docs
+ redirect_project_slug, serve_docs, robots_txt,
)
from readthedocs.core.views import (
server_error_500,
@@ -22,6 +24,8 @@
handler404 = server_error_404
subdomain_urls = [
+ url(r'robots.txt$', robots_txt, name='robots_txt'),
+
url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'
r'page/(?P<filename>.*)$'.format(**pattern_opts),
redirect_page_with_filename,
diff --git a/readthedocs/core/views/serve.py b/readthedocs/core/views/serve.py
--- a/readthedocs/core/views/serve.py
+++ b/readthedocs/core/views/serve.py
@@ -223,3 +223,49 @@
raise Http404(
'File not found. Tried these files: %s' % ','.join(files_tried))
+
+
+@map_project_slug
+def robots_txt(request, project):
+ """
+ Serve custom user's defined ``/robots.txt``.
+
+ If the user added a ``robots.txt`` in the "default version" of the project,
+ we serve it directly.
+ """
+ # Use the ``robots.txt`` file from the default version configured
+ version_slug = project.get_default_version()
+ version = project.versions.get(slug=version_slug)
+
+ no_serve_robots_txt = any([
+ # If project is private or,
+ project.privacy_level == constants.PRIVATE,
+ # default version is private or,
+ version.privacy_level == constants.PRIVATE,
+ # default version is not active or,
+ not version.active,
+ # default version is not built
+ not version.built,
+ ])
+ if no_serve_robots_txt:
+ # ... we do return a 404
+ raise Http404()
+
+ filename = resolve_path(
+ project,
+ version_slug=version_slug,
+ filename='robots.txt',
+ subdomain=True, # subdomain will make it a "full" path without a URL prefix
+ )
+
+ # This breaks path joining, by ignoring the root when given an "absolute" path
+ if filename[0] == '/':
+ filename = filename[1:]
+
+ basepath = PublicSymlink(project).project_root
+ fullpath = os.path.join(basepath, filename)
+
+ if os.path.exists(fullpath):
+ return HttpResponse(open(fullpath).read(), content_type='text/plain')
+
+ return HttpResponse('User-agent: *\nAllow: /\n', content_type='text/plain')
| {"golden_diff": "diff --git a/readthedocs/core/urls/subdomain.py b/readthedocs/core/urls/subdomain.py\n--- a/readthedocs/core/urls/subdomain.py\n+++ b/readthedocs/core/urls/subdomain.py\n@@ -1,3 +1,5 @@\n+# -*- coding: utf-8 -*-\n+\n \"\"\"URL configurations for subdomains.\"\"\"\n from __future__ import absolute_import\n \n@@ -10,7 +12,7 @@\n \n from readthedocs.core.views.serve import (\n redirect_page_with_filename,\n- redirect_project_slug, serve_docs\n+ redirect_project_slug, serve_docs, robots_txt,\n )\n from readthedocs.core.views import (\n server_error_500,\n@@ -22,6 +24,8 @@\n handler404 = server_error_404\n \n subdomain_urls = [\n+ url(r'robots.txt$', robots_txt, name='robots_txt'),\n+\n url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'page/(?P<filename>.*)$'.format(**pattern_opts),\n redirect_page_with_filename,\ndiff --git a/readthedocs/core/views/serve.py b/readthedocs/core/views/serve.py\n--- a/readthedocs/core/views/serve.py\n+++ b/readthedocs/core/views/serve.py\n@@ -223,3 +223,49 @@\n \n raise Http404(\n 'File not found. Tried these files: %s' % ','.join(files_tried))\n+\n+\n+@map_project_slug\n+def robots_txt(request, project):\n+ \"\"\"\n+ Serve custom user's defined ``/robots.txt``.\n+\n+ If the user added a ``robots.txt`` in the \"default version\" of the project,\n+ we serve it directly.\n+ \"\"\"\n+ # Use the ``robots.txt`` file from the default version configured\n+ version_slug = project.get_default_version()\n+ version = project.versions.get(slug=version_slug)\n+\n+ no_serve_robots_txt = any([\n+ # If project is private or,\n+ project.privacy_level == constants.PRIVATE,\n+ # default version is private or,\n+ version.privacy_level == constants.PRIVATE,\n+ # default version is not active or,\n+ not version.active,\n+ # default version is not built\n+ not version.built,\n+ ])\n+ if no_serve_robots_txt:\n+ # ... we do return a 404\n+ raise Http404()\n+\n+ filename = resolve_path(\n+ project,\n+ version_slug=version_slug,\n+ filename='robots.txt',\n+ subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n+ )\n+\n+ # This breaks path joining, by ignoring the root when given an \"absolute\" path\n+ if filename[0] == '/':\n+ filename = filename[1:]\n+\n+ basepath = PublicSymlink(project).project_root\n+ fullpath = os.path.join(basepath, filename)\n+\n+ if os.path.exists(fullpath):\n+ return HttpResponse(open(fullpath).read(), content_type='text/plain')\n+\n+ return HttpResponse('User-agent: *\\nAllow: /\\n', content_type='text/plain')\n", "issue": "Custom robots.txt support?\nWe've talked about blowing away the protected designation, so not sure if it makes sense to put special case on the protected privacy level, but maybe a separate option for docs that shouldn't be crawled?\n", "before_files": [{"content": "\"\"\"URL configurations for subdomains.\"\"\"\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom readthedocs.core.views.serve import (\n redirect_page_with_filename,\n redirect_project_slug, serve_docs\n)\nfrom readthedocs.core.views import (\n server_error_500,\n server_error_404,\n)\nfrom readthedocs.constants import pattern_opts\n\nhandler500 = server_error_500\nhandler404 = server_error_404\n\nsubdomain_urls = [\n url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'page/(?P<filename>.*)$'.format(**pattern_opts),\n redirect_page_with_filename,\n name='docs_detail'),\n\n url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)$').format(**pattern_opts),\n redirect_project_slug,\n name='redirect_project_slug'),\n\n url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'(?P<lang_slug>{lang_slug})/'\n r'(?P<version_slug>{version_slug})/'\n r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),\n serve_docs,\n name='docs_detail'),\n]\n\ngroups = [subdomain_urls]\n\n# Needed to serve media locally\nif getattr(settings, 'DEBUG', False):\n groups.insert(0, static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/core/urls/subdomain.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDoc serving from Python.\n\nIn production there are two modes,\n* Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)\n* Serving from private symlinks in Python (readthedocs.com only)\n\nIn development, we have two modes:\n* Serving from public symlinks in Python\n* Serving from private symlinks in Python\n\nThis means we should only serve from public symlinks in dev,\nand generally default to serving from private symlinks in Python only.\n\nPrivacy\n-------\n\nThese views will take into account the version privacy level.\n\nSettings\n--------\n\nPYTHON_MEDIA (False) - Set this to True to serve docs & media from Python\nSERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.\n\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport logging\nimport mimetypes\nimport os\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\nfrom django.utils.encoding import iri_to_uri\nfrom django.views.static import serve\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.resolver import resolve, resolve_path\nfrom readthedocs.core.symlink import PrivateSymlink, PublicSymlink\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, ProjectRelationship\n\nlog = logging.getLogger(__name__)\n\n\ndef map_subproject_slug(view_func):\n \"\"\"\n A decorator that maps a ``subproject_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n @wraps(view_func)\n def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa\n if subproject is None and subproject_slug:\n # Try to fetch by subproject alias first, otherwise we might end up\n # redirected to an unrelated project.\n try:\n # Depends on a project passed into kwargs\n rel = ProjectRelationship.objects.get(\n parent=kwargs['project'],\n alias=subproject_slug,\n )\n subproject = rel.child\n except (ProjectRelationship.DoesNotExist, KeyError):\n subproject = get_object_or_404(Project, slug=subproject_slug)\n return view_func(request, subproject=subproject, *args, **kwargs)\n\n return inner_view\n\n\ndef map_project_slug(view_func):\n \"\"\"\n A decorator that maps a ``project_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n @wraps(view_func)\n def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa\n if project is None:\n if not project_slug:\n project_slug = request.slug\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n raise Http404('Project does not exist.')\n return view_func(request, project=project, *args, **kwargs)\n\n return inner_view\n\n\n@map_project_slug\n@map_subproject_slug\ndef redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument\n \"\"\"Handle / -> /en/latest/ directs on subdomains.\"\"\"\n return HttpResponseRedirect(resolve(subproject or project))\n\n\n@map_project_slug\n@map_subproject_slug\ndef redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa\n \"\"\"Redirect /page/file.html to /en/latest/file.html.\"\"\"\n return HttpResponseRedirect(\n resolve(subproject or project, filename=filename))\n\n\ndef _serve_401(request, project):\n res = render(request, '401.html')\n res.status_code = 401\n log.debug('Unauthorized access to {0} documentation'.format(project.slug))\n return res\n\n\ndef _serve_file(request, filename, basepath):\n # Serve the file from the proper location\n if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):\n # Serve from Python\n return serve(request, filename, basepath)\n\n # Serve from Nginx\n content_type, encoding = mimetypes.guess_type(\n os.path.join(basepath, filename))\n content_type = content_type or 'application/octet-stream'\n response = HttpResponse(content_type=content_type)\n if encoding:\n response['Content-Encoding'] = encoding\n try:\n iri_path = os.path.join(\n basepath[len(settings.SITE_ROOT):],\n filename,\n )\n # NGINX does not support non-ASCII characters in the header, so we\n # convert the IRI path to URI so it's compatible with what NGINX expects\n # as the header value.\n # https://github.com/benoitc/gunicorn/issues/1448\n # https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling\n x_accel_redirect = iri_to_uri(iri_path)\n response['X-Accel-Redirect'] = x_accel_redirect\n except UnicodeEncodeError:\n raise Http404\n\n return response\n\n\n@map_project_slug\n@map_subproject_slug\ndef serve_docs(\n request, project, subproject, lang_slug=None, version_slug=None,\n filename=''):\n \"\"\"Exists to map existing proj, lang, version, filename views to the file format.\"\"\"\n if not version_slug:\n version_slug = project.get_default_version()\n try:\n version = project.versions.public(request.user).get(slug=version_slug)\n except Version.DoesNotExist:\n # Properly raise a 404 if the version doesn't exist (or is inactive) and\n # a 401 if it does\n if project.versions.filter(slug=version_slug, active=True).exists():\n return _serve_401(request, project)\n raise Http404('Version does not exist.')\n filename = resolve_path(\n subproject or project, # Resolve the subproject if it exists\n version_slug=version_slug,\n language=lang_slug,\n filename=filename,\n subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n )\n if (version.privacy_level == constants.PRIVATE and\n not AdminPermission.is_member(user=request.user, obj=project)):\n return _serve_401(request, project)\n return _serve_symlink_docs(\n request,\n filename=filename,\n project=project,\n privacy_level=version.privacy_level,\n )\n\n\n@map_project_slug\ndef _serve_symlink_docs(request, project, privacy_level, filename=''):\n \"\"\"Serve a file by symlink, or a 404 if not found.\"\"\"\n # Handle indexes\n if filename == '' or filename[-1] == '/':\n filename += 'index.html'\n\n # This breaks path joining, by ignoring the root when given an \"absolute\" path\n if filename[0] == '/':\n filename = filename[1:]\n\n log.info('Serving %s for %s', filename, project)\n\n files_tried = []\n\n serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])\n\n if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa\n public_symlink = PublicSymlink(project)\n basepath = public_symlink.project_root\n if os.path.exists(os.path.join(basepath, filename)):\n return _serve_file(request, filename, basepath)\n\n files_tried.append(os.path.join(basepath, filename))\n\n if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa\n # Handle private\n private_symlink = PrivateSymlink(project)\n basepath = private_symlink.project_root\n\n if os.path.exists(os.path.join(basepath, filename)):\n return _serve_file(request, filename, basepath)\n\n files_tried.append(os.path.join(basepath, filename))\n\n raise Http404(\n 'File not found. Tried these files: %s' % ','.join(files_tried))\n", "path": "readthedocs/core/views/serve.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"URL configurations for subdomains.\"\"\"\nfrom __future__ import absolute_import\n\nfrom functools import reduce\nfrom operator import add\n\nfrom django.conf.urls import url\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom readthedocs.core.views.serve import (\n redirect_page_with_filename,\n redirect_project_slug, serve_docs, robots_txt,\n)\nfrom readthedocs.core.views import (\n server_error_500,\n server_error_404,\n)\nfrom readthedocs.constants import pattern_opts\n\nhandler500 = server_error_500\nhandler404 = server_error_404\n\nsubdomain_urls = [\n url(r'robots.txt$', robots_txt, name='robots_txt'),\n\n url(r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'page/(?P<filename>.*)$'.format(**pattern_opts),\n redirect_page_with_filename,\n name='docs_detail'),\n\n url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)$').format(**pattern_opts),\n redirect_project_slug,\n name='redirect_project_slug'),\n\n url((r'^(?:|projects/(?P<subproject_slug>{project_slug})/)'\n r'(?P<lang_slug>{lang_slug})/'\n r'(?P<version_slug>{version_slug})/'\n r'(?P<filename>{filename_slug})$'.format(**pattern_opts)),\n serve_docs,\n name='docs_detail'),\n]\n\ngroups = [subdomain_urls]\n\n# Needed to serve media locally\nif getattr(settings, 'DEBUG', False):\n groups.insert(0, static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT))\n\nurlpatterns = reduce(add, groups)\n", "path": "readthedocs/core/urls/subdomain.py"}, {"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDoc serving from Python.\n\nIn production there are two modes,\n* Serving from public symlinks in nginx (readthedocs.org & readthedocs.com)\n* Serving from private symlinks in Python (readthedocs.com only)\n\nIn development, we have two modes:\n* Serving from public symlinks in Python\n* Serving from private symlinks in Python\n\nThis means we should only serve from public symlinks in dev,\nand generally default to serving from private symlinks in Python only.\n\nPrivacy\n-------\n\nThese views will take into account the version privacy level.\n\nSettings\n--------\n\nPYTHON_MEDIA (False) - Set this to True to serve docs & media from Python\nSERVE_DOCS (['private']) - The list of ['private', 'public'] docs to serve.\n\"\"\"\n\nfrom __future__ import (\n absolute_import, division, print_function, unicode_literals)\n\nimport logging\nimport mimetypes\nimport os\nfrom functools import wraps\n\nfrom django.conf import settings\nfrom django.http import HttpResponse, HttpResponseRedirect, Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import render\nfrom django.utils.encoding import iri_to_uri\nfrom django.views.static import serve\n\nfrom readthedocs.builds.models import Version\nfrom readthedocs.core.permissions import AdminPermission\nfrom readthedocs.core.resolver import resolve, resolve_path\nfrom readthedocs.core.symlink import PrivateSymlink, PublicSymlink\nfrom readthedocs.projects import constants\nfrom readthedocs.projects.models import Project, ProjectRelationship\n\nlog = logging.getLogger(__name__)\n\n\ndef map_subproject_slug(view_func):\n \"\"\"\n A decorator that maps a ``subproject_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n @wraps(view_func)\n def inner_view(request, subproject=None, subproject_slug=None, *args, **kwargs): # noqa\n if subproject is None and subproject_slug:\n # Try to fetch by subproject alias first, otherwise we might end up\n # redirected to an unrelated project.\n try:\n # Depends on a project passed into kwargs\n rel = ProjectRelationship.objects.get(\n parent=kwargs['project'],\n alias=subproject_slug,\n )\n subproject = rel.child\n except (ProjectRelationship.DoesNotExist, KeyError):\n subproject = get_object_or_404(Project, slug=subproject_slug)\n return view_func(request, subproject=subproject, *args, **kwargs)\n\n return inner_view\n\n\ndef map_project_slug(view_func):\n \"\"\"\n A decorator that maps a ``project_slug`` URL param into a Project.\n\n :raises: Http404 if the Project doesn't exist\n\n .. warning:: Does not take into account any kind of privacy settings.\n \"\"\"\n @wraps(view_func)\n def inner_view(request, project=None, project_slug=None, *args, **kwargs): # noqa\n if project is None:\n if not project_slug:\n project_slug = request.slug\n try:\n project = Project.objects.get(slug=project_slug)\n except Project.DoesNotExist:\n raise Http404('Project does not exist.')\n return view_func(request, project=project, *args, **kwargs)\n\n return inner_view\n\n\n@map_project_slug\n@map_subproject_slug\ndef redirect_project_slug(request, project, subproject): # pylint: disable=unused-argument\n \"\"\"Handle / -> /en/latest/ directs on subdomains.\"\"\"\n return HttpResponseRedirect(resolve(subproject or project))\n\n\n@map_project_slug\n@map_subproject_slug\ndef redirect_page_with_filename(request, project, subproject, filename): # pylint: disable=unused-argument # noqa\n \"\"\"Redirect /page/file.html to /en/latest/file.html.\"\"\"\n return HttpResponseRedirect(\n resolve(subproject or project, filename=filename))\n\n\ndef _serve_401(request, project):\n res = render(request, '401.html')\n res.status_code = 401\n log.debug('Unauthorized access to {0} documentation'.format(project.slug))\n return res\n\n\ndef _serve_file(request, filename, basepath):\n # Serve the file from the proper location\n if settings.DEBUG or getattr(settings, 'PYTHON_MEDIA', False):\n # Serve from Python\n return serve(request, filename, basepath)\n\n # Serve from Nginx\n content_type, encoding = mimetypes.guess_type(\n os.path.join(basepath, filename))\n content_type = content_type or 'application/octet-stream'\n response = HttpResponse(content_type=content_type)\n if encoding:\n response['Content-Encoding'] = encoding\n try:\n iri_path = os.path.join(\n basepath[len(settings.SITE_ROOT):],\n filename,\n )\n # NGINX does not support non-ASCII characters in the header, so we\n # convert the IRI path to URI so it's compatible with what NGINX expects\n # as the header value.\n # https://github.com/benoitc/gunicorn/issues/1448\n # https://docs.djangoproject.com/en/1.11/ref/unicode/#uri-and-iri-handling\n x_accel_redirect = iri_to_uri(iri_path)\n response['X-Accel-Redirect'] = x_accel_redirect\n except UnicodeEncodeError:\n raise Http404\n\n return response\n\n\n@map_project_slug\n@map_subproject_slug\ndef serve_docs(\n request, project, subproject, lang_slug=None, version_slug=None,\n filename=''):\n \"\"\"Exists to map existing proj, lang, version, filename views to the file format.\"\"\"\n if not version_slug:\n version_slug = project.get_default_version()\n try:\n version = project.versions.public(request.user).get(slug=version_slug)\n except Version.DoesNotExist:\n # Properly raise a 404 if the version doesn't exist (or is inactive) and\n # a 401 if it does\n if project.versions.filter(slug=version_slug, active=True).exists():\n return _serve_401(request, project)\n raise Http404('Version does not exist.')\n filename = resolve_path(\n subproject or project, # Resolve the subproject if it exists\n version_slug=version_slug,\n language=lang_slug,\n filename=filename,\n subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n )\n if (version.privacy_level == constants.PRIVATE and\n not AdminPermission.is_member(user=request.user, obj=project)):\n return _serve_401(request, project)\n return _serve_symlink_docs(\n request,\n filename=filename,\n project=project,\n privacy_level=version.privacy_level,\n )\n\n\n@map_project_slug\ndef _serve_symlink_docs(request, project, privacy_level, filename=''):\n \"\"\"Serve a file by symlink, or a 404 if not found.\"\"\"\n # Handle indexes\n if filename == '' or filename[-1] == '/':\n filename += 'index.html'\n\n # This breaks path joining, by ignoring the root when given an \"absolute\" path\n if filename[0] == '/':\n filename = filename[1:]\n\n log.info('Serving %s for %s', filename, project)\n\n files_tried = []\n\n serve_docs = getattr(settings, 'SERVE_DOCS', [constants.PRIVATE])\n\n if (settings.DEBUG or constants.PUBLIC in serve_docs) and privacy_level != constants.PRIVATE: # yapf: disable # noqa\n public_symlink = PublicSymlink(project)\n basepath = public_symlink.project_root\n if os.path.exists(os.path.join(basepath, filename)):\n return _serve_file(request, filename, basepath)\n\n files_tried.append(os.path.join(basepath, filename))\n\n if (settings.DEBUG or constants.PRIVATE in serve_docs) and privacy_level == constants.PRIVATE: # yapf: disable # noqa\n # Handle private\n private_symlink = PrivateSymlink(project)\n basepath = private_symlink.project_root\n\n if os.path.exists(os.path.join(basepath, filename)):\n return _serve_file(request, filename, basepath)\n\n files_tried.append(os.path.join(basepath, filename))\n\n raise Http404(\n 'File not found. Tried these files: %s' % ','.join(files_tried))\n\n\n@map_project_slug\ndef robots_txt(request, project):\n \"\"\"\n Serve custom user's defined ``/robots.txt``.\n\n If the user added a ``robots.txt`` in the \"default version\" of the project,\n we serve it directly.\n \"\"\"\n # Use the ``robots.txt`` file from the default version configured\n version_slug = project.get_default_version()\n version = project.versions.get(slug=version_slug)\n\n no_serve_robots_txt = any([\n # If project is private or,\n project.privacy_level == constants.PRIVATE,\n # default version is private or,\n version.privacy_level == constants.PRIVATE,\n # default version is not active or,\n not version.active,\n # default version is not built\n not version.built,\n ])\n if no_serve_robots_txt:\n # ... we do return a 404\n raise Http404()\n\n filename = resolve_path(\n project,\n version_slug=version_slug,\n filename='robots.txt',\n subdomain=True, # subdomain will make it a \"full\" path without a URL prefix\n )\n\n # This breaks path joining, by ignoring the root when given an \"absolute\" path\n if filename[0] == '/':\n filename = filename[1:]\n\n basepath = PublicSymlink(project).project_root\n fullpath = os.path.join(basepath, filename)\n\n if os.path.exists(fullpath):\n return HttpResponse(open(fullpath).read(), content_type='text/plain')\n\n return HttpResponse('User-agent: *\\nAllow: /\\n', content_type='text/plain')\n", "path": "readthedocs/core/views/serve.py"}]} | 3,198 | 713 |
gh_patches_debug_13364 | rasdani/github-patches | git_diff | lutris__lutris-3987 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Installer didnt Start Errors in .py
i want to install some stuff like kotor with steam but get everytime the same error.
File "/usr/lib/python3.10/site-packages/lutris/gui/installerwindow.py", line 155, in on_installer_selected
self.interpreter = interpreter.ScriptInterpreter(
File "/usr/lib/python3.10/site-packages/lutris/installer/interpreter.py", line 50, in __init__
self.installer = LutrisInstaller(installer, self, service=self.service, appid=self.appid)
File "/usr/lib/python3.10/site-packages/lutris/installer/installer.py", line 31, in __init__
self.service = self.get_service(initial=service)
File "/usr/lib/python3.10/site-packages/lutris/installer/installer.py", line 47, in get_service
return SERVICES["steam"]()
KeyError: 'steam'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lutris/installer/installer.py`
Content:
```
1 """Lutris installer class"""
2 import json
3 import os
4 from gettext import gettext as _
5
6 from lutris.config import LutrisConfig, write_game_config
7 from lutris.database.games import add_or_update, get_game_by_field
8 from lutris.game import Game
9 from lutris.installer import AUTO_ELF_EXE, AUTO_WIN32_EXE
10 from lutris.installer.errors import ScriptingError
11 from lutris.installer.installer_file import InstallerFile
12 from lutris.installer.legacy import get_game_launcher
13 from lutris.runners import import_runner
14 from lutris.services import SERVICES
15 from lutris.util.game_finder import find_linux_game_executable, find_windows_game_executable
16 from lutris.util.log import logger
17
18
19 class LutrisInstaller: # pylint: disable=too-many-instance-attributes
20 """Represents a Lutris installer"""
21
22 def __init__(self, installer, interpreter, service, appid):
23 self.interpreter = interpreter
24 self.installer = installer
25 self.version = installer["version"]
26 self.slug = installer["slug"]
27 self.year = installer.get("year")
28 self.runner = installer["runner"]
29 self.script = installer.get("script")
30 self.game_name = installer["name"]
31 self.game_slug = installer["game_slug"]
32 self.service = self.get_service(initial=service)
33 self.service_appid = self.get_appid(installer, initial=appid)
34 self.variables = installer.get("variables", {})
35 self.files = [
36 InstallerFile(self.game_slug, file_id, file_meta)
37 for file_desc in self.script.get("files", [])
38 for file_id, file_meta in file_desc.items()
39 ]
40 self.requires = self.script.get("requires")
41 self.extends = self.script.get("extends")
42 self.game_id = self.get_game_id()
43
44 def get_service(self, initial=None):
45 if initial:
46 return initial
47 if "steam" in self.runner:
48 return SERVICES["steam"]()
49 version = self.version.lower()
50 if "humble" in version:
51 return SERVICES["humblebundle"]()
52 if "gog" in version:
53 return SERVICES["gog"]()
54
55 def get_appid(self, installer, initial=None):
56 if initial:
57 return initial
58 if not self.service:
59 return
60 if self.service.id == "steam":
61 return installer.get("steamid")
62 game_config = self.script.get("game", {})
63 if self.service.id == "gog":
64 return game_config.get("gogid") or installer.get("gogid")
65 if self.service.id == "humblebundle":
66 return game_config.get("humbleid") or installer.get("humblestoreid")
67
68 @property
69 def script_pretty(self):
70 """Return a pretty print of the script"""
71 return json.dumps(self.script, indent=4)
72
73 def get_game_id(self):
74 """Return the ID of the game in the local DB if one exists"""
75 # If the game is in the library and uninstalled, the first installation
76 # updates it
77 existing_game = get_game_by_field(self.game_slug, "slug")
78 if existing_game and not existing_game["installed"]:
79 return existing_game["id"]
80
81 @property
82 def creates_game_folder(self):
83 """Determines if an install script should create a game folder for the game"""
84 if self.requires:
85 # Game is an extension of an existing game, folder exists
86 return False
87 if self.runner == "steam":
88 # Steam games installs in their steamapps directory
89 return False
90 if (
91 self.files
92 or self.script.get("game", {}).get("gog")
93 or self.script.get("game", {}).get("prefix")
94 ):
95 return True
96 command_names = [list(c.keys())[0] for c in self.script.get("installer", [])]
97 if "insert-disc" in command_names:
98 return True
99 return False
100
101 def get_errors(self):
102 """Return potential errors in the script"""
103 errors = []
104 if not isinstance(self.script, dict):
105 errors.append("Script must be a dictionary")
106 # Return early since the method assumes a dict
107 return errors
108
109 # Check that installers contains all required fields
110 for field in ("runner", "game_name", "game_slug"):
111 if not hasattr(self, field) or not getattr(self, field):
112 errors.append("Missing field '%s'" % field)
113
114 # Check that libretro installers have a core specified
115 if self.runner == "libretro":
116 if "game" not in self.script or "core" not in self.script["game"]:
117 errors.append("Missing libretro core in game section")
118
119 # Check that Steam games have an AppID
120 if self.runner == "steam":
121 if not self.script.get("game", {}).get("appid"):
122 errors.append("Missing appid for Steam game")
123
124 # Check that installers don't contain both 'requires' and 'extends'
125 if self.script.get("requires") and self.script.get("extends"):
126 errors.append("Scripts can't have both extends and requires")
127 return errors
128
129 def pop_user_provided_file(self):
130 """Return and remove the first user provided file, which is used for game stores"""
131 for index, file in enumerate(self.files):
132 if file.url.startswith("N/A"):
133 self.files.pop(index)
134 return file.id
135
136 def prepare_game_files(self):
137 """Gathers necessary files before iterating through them."""
138 if not self.files:
139 return
140 if self.service:
141 if self.service.online and not self.service.is_connected():
142 logger.info("Not authenticated to %s", self.service.id)
143 return
144 installer_file_id = self.pop_user_provided_file()
145 if not installer_file_id:
146 logger.warning("Could not find a file for this service")
147 return
148 if self.service.has_extras:
149 self.service.selected_extras = self.interpreter.extras
150 installer_files = self.service.get_installer_files(self, installer_file_id)
151 for installer_file in installer_files:
152 self.files.append(installer_file)
153 if not installer_files:
154 # Failed to get the service game, put back a user provided file
155 self.files.insert(0, "N/A: Provider installer file")
156
157 def _substitute_config(self, script_config):
158 """Substitute values such as $GAMEDIR in a config dict."""
159 config = {}
160 for key in script_config:
161 if not isinstance(key, str):
162 raise ScriptingError(_("Game config key must be a string"), key)
163 value = script_config[key]
164 if str(value).lower() == 'true':
165 value = True
166 if str(value).lower() == 'false':
167 value = False
168 if isinstance(value, list):
169 config[key] = [self.interpreter._substitute(i) for i in value]
170 elif isinstance(value, dict):
171 config[key] = {k: self.interpreter._substitute(v) for (k, v) in value.items()}
172 elif isinstance(value, bool):
173 config[key] = value
174 else:
175 config[key] = self.interpreter._substitute(value)
176 return config
177
178 def get_game_config(self):
179 """Return the game configuration"""
180 if self.requires:
181 # Load the base game config
182 required_game = get_game_by_field(self.requires, field="installer_slug")
183 if not required_game:
184 required_game = get_game_by_field(self.requires, field="slug")
185 if not required_game:
186 raise ValueError("No game matched '%s' on installer_slug or slug" % self.requires)
187 base_config = LutrisConfig(
188 runner_slug=self.runner, game_config_id=required_game["configpath"]
189 )
190 config = base_config.game_level
191 else:
192 config = {"game": {}}
193
194 # Config update
195 if "system" in self.script:
196 config["system"] = self._substitute_config(self.script["system"])
197 if self.runner in self.script and self.script[self.runner]:
198 config[self.runner] = self._substitute_config(self.script[self.runner])
199 launcher, launcher_config = self.get_game_launcher_config(self.interpreter.game_files)
200 if launcher:
201 config["game"][launcher] = launcher_config
202
203 if "game" in self.script:
204 try:
205 config["game"].update(self.script["game"])
206 except ValueError as err:
207 raise ScriptingError(_("Invalid 'game' section"), self.script["game"]) from err
208 config["game"] = self._substitute_config(config["game"])
209 if AUTO_ELF_EXE in config["game"].get("exe", ""):
210 config["game"]["exe"] = find_linux_game_executable(self.interpreter.target_path,
211 make_executable=True)
212 elif AUTO_WIN32_EXE in config["game"].get("exe", ""):
213 config["game"]["exe"] = find_windows_game_executable(self.interpreter.target_path)
214 return config
215
216 def save(self):
217 """Write the game configuration in the DB and config file"""
218 if self.extends:
219 logger.info(
220 "This is an extension to %s, not creating a new game entry",
221 self.extends,
222 )
223 return
224 configpath = write_game_config(self.slug, self.get_game_config())
225 runner_inst = import_runner(self.runner)()
226 if self.service:
227 service_id = self.service.id
228 else:
229 service_id = None
230 self.game_id = add_or_update(
231 name=self.game_name,
232 runner=self.runner,
233 slug=self.game_slug,
234 platform=runner_inst.get_platform(),
235 directory=self.interpreter.target_path,
236 installed=1,
237 hidden=0,
238 installer_slug=self.slug,
239 parent_slug=self.requires,
240 year=self.year,
241 configpath=configpath,
242 service=service_id,
243 service_id=self.service_appid,
244 id=self.game_id,
245 )
246 # This is a bit redundant but used to trigger the game-updated signal
247 game = Game(self.game_id)
248 game.save()
249
250 def get_game_launcher_config(self, game_files):
251 """Game options such as exe or main_file can be added at the root of the
252 script as a shortcut, this integrates them into the game config properly
253 This should be deprecated. Game launchers should go in the game section.
254 """
255 launcher, launcher_value = get_game_launcher(self.script)
256 if isinstance(launcher_value, list):
257 launcher_values = []
258 for game_file in launcher_value:
259 if game_file in game_files:
260 launcher_values.append(game_files[game_file])
261 else:
262 launcher_values.append(game_file)
263 return launcher, launcher_values
264 if launcher_value:
265 if launcher_value in game_files:
266 launcher_value = game_files[launcher_value]
267 elif self.interpreter.target_path and os.path.exists(
268 os.path.join(self.interpreter.target_path, launcher_value)
269 ):
270 launcher_value = os.path.join(self.interpreter.target_path, launcher_value)
271 return launcher, launcher_value
272
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lutris/installer/installer.py b/lutris/installer/installer.py
--- a/lutris/installer/installer.py
+++ b/lutris/installer/installer.py
@@ -44,12 +44,12 @@
def get_service(self, initial=None):
if initial:
return initial
- if "steam" in self.runner:
+ if "steam" in self.runner and "steam" in SERVICES:
return SERVICES["steam"]()
version = self.version.lower()
- if "humble" in version:
+ if "humble" in version and "humblebundle" in SERVICES:
return SERVICES["humblebundle"]()
- if "gog" in version:
+ if "gog" in version and "gog" in SERVICES:
return SERVICES["gog"]()
def get_appid(self, installer, initial=None):
| {"golden_diff": "diff --git a/lutris/installer/installer.py b/lutris/installer/installer.py\n--- a/lutris/installer/installer.py\n+++ b/lutris/installer/installer.py\n@@ -44,12 +44,12 @@\n def get_service(self, initial=None):\n if initial:\n return initial\n- if \"steam\" in self.runner:\n+ if \"steam\" in self.runner and \"steam\" in SERVICES:\n return SERVICES[\"steam\"]()\n version = self.version.lower()\n- if \"humble\" in version:\n+ if \"humble\" in version and \"humblebundle\" in SERVICES:\n return SERVICES[\"humblebundle\"]()\n- if \"gog\" in version:\n+ if \"gog\" in version and \"gog\" in SERVICES:\n return SERVICES[\"gog\"]()\n \n def get_appid(self, installer, initial=None):\n", "issue": "Installer didnt Start Errors in .py\ni want to install some stuff like kotor with steam but get everytime the same error.\r\n\r\nFile \"/usr/lib/python3.10/site-packages/lutris/gui/installerwindow.py\", line 155, in on_installer_selected\r\n self.interpreter = interpreter.ScriptInterpreter(\r\n File \"/usr/lib/python3.10/site-packages/lutris/installer/interpreter.py\", line 50, in __init__\r\n self.installer = LutrisInstaller(installer, self, service=self.service, appid=self.appid)\r\n File \"/usr/lib/python3.10/site-packages/lutris/installer/installer.py\", line 31, in __init__\r\n self.service = self.get_service(initial=service)\r\n File \"/usr/lib/python3.10/site-packages/lutris/installer/installer.py\", line 47, in get_service\r\n return SERVICES[\"steam\"]()\r\nKeyError: 'steam'\r\n\n", "before_files": [{"content": "\"\"\"Lutris installer class\"\"\"\nimport json\nimport os\nfrom gettext import gettext as _\n\nfrom lutris.config import LutrisConfig, write_game_config\nfrom lutris.database.games import add_or_update, get_game_by_field\nfrom lutris.game import Game\nfrom lutris.installer import AUTO_ELF_EXE, AUTO_WIN32_EXE\nfrom lutris.installer.errors import ScriptingError\nfrom lutris.installer.installer_file import InstallerFile\nfrom lutris.installer.legacy import get_game_launcher\nfrom lutris.runners import import_runner\nfrom lutris.services import SERVICES\nfrom lutris.util.game_finder import find_linux_game_executable, find_windows_game_executable\nfrom lutris.util.log import logger\n\n\nclass LutrisInstaller: # pylint: disable=too-many-instance-attributes\n \"\"\"Represents a Lutris installer\"\"\"\n\n def __init__(self, installer, interpreter, service, appid):\n self.interpreter = interpreter\n self.installer = installer\n self.version = installer[\"version\"]\n self.slug = installer[\"slug\"]\n self.year = installer.get(\"year\")\n self.runner = installer[\"runner\"]\n self.script = installer.get(\"script\")\n self.game_name = installer[\"name\"]\n self.game_slug = installer[\"game_slug\"]\n self.service = self.get_service(initial=service)\n self.service_appid = self.get_appid(installer, initial=appid)\n self.variables = installer.get(\"variables\", {})\n self.files = [\n InstallerFile(self.game_slug, file_id, file_meta)\n for file_desc in self.script.get(\"files\", [])\n for file_id, file_meta in file_desc.items()\n ]\n self.requires = self.script.get(\"requires\")\n self.extends = self.script.get(\"extends\")\n self.game_id = self.get_game_id()\n\n def get_service(self, initial=None):\n if initial:\n return initial\n if \"steam\" in self.runner:\n return SERVICES[\"steam\"]()\n version = self.version.lower()\n if \"humble\" in version:\n return SERVICES[\"humblebundle\"]()\n if \"gog\" in version:\n return SERVICES[\"gog\"]()\n\n def get_appid(self, installer, initial=None):\n if initial:\n return initial\n if not self.service:\n return\n if self.service.id == \"steam\":\n return installer.get(\"steamid\")\n game_config = self.script.get(\"game\", {})\n if self.service.id == \"gog\":\n return game_config.get(\"gogid\") or installer.get(\"gogid\")\n if self.service.id == \"humblebundle\":\n return game_config.get(\"humbleid\") or installer.get(\"humblestoreid\")\n\n @property\n def script_pretty(self):\n \"\"\"Return a pretty print of the script\"\"\"\n return json.dumps(self.script, indent=4)\n\n def get_game_id(self):\n \"\"\"Return the ID of the game in the local DB if one exists\"\"\"\n # If the game is in the library and uninstalled, the first installation\n # updates it\n existing_game = get_game_by_field(self.game_slug, \"slug\")\n if existing_game and not existing_game[\"installed\"]:\n return existing_game[\"id\"]\n\n @property\n def creates_game_folder(self):\n \"\"\"Determines if an install script should create a game folder for the game\"\"\"\n if self.requires:\n # Game is an extension of an existing game, folder exists\n return False\n if self.runner == \"steam\":\n # Steam games installs in their steamapps directory\n return False\n if (\n self.files\n or self.script.get(\"game\", {}).get(\"gog\")\n or self.script.get(\"game\", {}).get(\"prefix\")\n ):\n return True\n command_names = [list(c.keys())[0] for c in self.script.get(\"installer\", [])]\n if \"insert-disc\" in command_names:\n return True\n return False\n\n def get_errors(self):\n \"\"\"Return potential errors in the script\"\"\"\n errors = []\n if not isinstance(self.script, dict):\n errors.append(\"Script must be a dictionary\")\n # Return early since the method assumes a dict\n return errors\n\n # Check that installers contains all required fields\n for field in (\"runner\", \"game_name\", \"game_slug\"):\n if not hasattr(self, field) or not getattr(self, field):\n errors.append(\"Missing field '%s'\" % field)\n\n # Check that libretro installers have a core specified\n if self.runner == \"libretro\":\n if \"game\" not in self.script or \"core\" not in self.script[\"game\"]:\n errors.append(\"Missing libretro core in game section\")\n\n # Check that Steam games have an AppID\n if self.runner == \"steam\":\n if not self.script.get(\"game\", {}).get(\"appid\"):\n errors.append(\"Missing appid for Steam game\")\n\n # Check that installers don't contain both 'requires' and 'extends'\n if self.script.get(\"requires\") and self.script.get(\"extends\"):\n errors.append(\"Scripts can't have both extends and requires\")\n return errors\n\n def pop_user_provided_file(self):\n \"\"\"Return and remove the first user provided file, which is used for game stores\"\"\"\n for index, file in enumerate(self.files):\n if file.url.startswith(\"N/A\"):\n self.files.pop(index)\n return file.id\n\n def prepare_game_files(self):\n \"\"\"Gathers necessary files before iterating through them.\"\"\"\n if not self.files:\n return\n if self.service:\n if self.service.online and not self.service.is_connected():\n logger.info(\"Not authenticated to %s\", self.service.id)\n return\n installer_file_id = self.pop_user_provided_file()\n if not installer_file_id:\n logger.warning(\"Could not find a file for this service\")\n return\n if self.service.has_extras:\n self.service.selected_extras = self.interpreter.extras\n installer_files = self.service.get_installer_files(self, installer_file_id)\n for installer_file in installer_files:\n self.files.append(installer_file)\n if not installer_files:\n # Failed to get the service game, put back a user provided file\n self.files.insert(0, \"N/A: Provider installer file\")\n\n def _substitute_config(self, script_config):\n \"\"\"Substitute values such as $GAMEDIR in a config dict.\"\"\"\n config = {}\n for key in script_config:\n if not isinstance(key, str):\n raise ScriptingError(_(\"Game config key must be a string\"), key)\n value = script_config[key]\n if str(value).lower() == 'true':\n value = True\n if str(value).lower() == 'false':\n value = False\n if isinstance(value, list):\n config[key] = [self.interpreter._substitute(i) for i in value]\n elif isinstance(value, dict):\n config[key] = {k: self.interpreter._substitute(v) for (k, v) in value.items()}\n elif isinstance(value, bool):\n config[key] = value\n else:\n config[key] = self.interpreter._substitute(value)\n return config\n\n def get_game_config(self):\n \"\"\"Return the game configuration\"\"\"\n if self.requires:\n # Load the base game config\n required_game = get_game_by_field(self.requires, field=\"installer_slug\")\n if not required_game:\n required_game = get_game_by_field(self.requires, field=\"slug\")\n if not required_game:\n raise ValueError(\"No game matched '%s' on installer_slug or slug\" % self.requires)\n base_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=required_game[\"configpath\"]\n )\n config = base_config.game_level\n else:\n config = {\"game\": {}}\n\n # Config update\n if \"system\" in self.script:\n config[\"system\"] = self._substitute_config(self.script[\"system\"])\n if self.runner in self.script and self.script[self.runner]:\n config[self.runner] = self._substitute_config(self.script[self.runner])\n launcher, launcher_config = self.get_game_launcher_config(self.interpreter.game_files)\n if launcher:\n config[\"game\"][launcher] = launcher_config\n\n if \"game\" in self.script:\n try:\n config[\"game\"].update(self.script[\"game\"])\n except ValueError as err:\n raise ScriptingError(_(\"Invalid 'game' section\"), self.script[\"game\"]) from err\n config[\"game\"] = self._substitute_config(config[\"game\"])\n if AUTO_ELF_EXE in config[\"game\"].get(\"exe\", \"\"):\n config[\"game\"][\"exe\"] = find_linux_game_executable(self.interpreter.target_path,\n make_executable=True)\n elif AUTO_WIN32_EXE in config[\"game\"].get(\"exe\", \"\"):\n config[\"game\"][\"exe\"] = find_windows_game_executable(self.interpreter.target_path)\n return config\n\n def save(self):\n \"\"\"Write the game configuration in the DB and config file\"\"\"\n if self.extends:\n logger.info(\n \"This is an extension to %s, not creating a new game entry\",\n self.extends,\n )\n return\n configpath = write_game_config(self.slug, self.get_game_config())\n runner_inst = import_runner(self.runner)()\n if self.service:\n service_id = self.service.id\n else:\n service_id = None\n self.game_id = add_or_update(\n name=self.game_name,\n runner=self.runner,\n slug=self.game_slug,\n platform=runner_inst.get_platform(),\n directory=self.interpreter.target_path,\n installed=1,\n hidden=0,\n installer_slug=self.slug,\n parent_slug=self.requires,\n year=self.year,\n configpath=configpath,\n service=service_id,\n service_id=self.service_appid,\n id=self.game_id,\n )\n # This is a bit redundant but used to trigger the game-updated signal\n game = Game(self.game_id)\n game.save()\n\n def get_game_launcher_config(self, game_files):\n \"\"\"Game options such as exe or main_file can be added at the root of the\n script as a shortcut, this integrates them into the game config properly\n This should be deprecated. Game launchers should go in the game section.\n \"\"\"\n launcher, launcher_value = get_game_launcher(self.script)\n if isinstance(launcher_value, list):\n launcher_values = []\n for game_file in launcher_value:\n if game_file in game_files:\n launcher_values.append(game_files[game_file])\n else:\n launcher_values.append(game_file)\n return launcher, launcher_values\n if launcher_value:\n if launcher_value in game_files:\n launcher_value = game_files[launcher_value]\n elif self.interpreter.target_path and os.path.exists(\n os.path.join(self.interpreter.target_path, launcher_value)\n ):\n launcher_value = os.path.join(self.interpreter.target_path, launcher_value)\n return launcher, launcher_value\n", "path": "lutris/installer/installer.py"}], "after_files": [{"content": "\"\"\"Lutris installer class\"\"\"\nimport json\nimport os\nfrom gettext import gettext as _\n\nfrom lutris.config import LutrisConfig, write_game_config\nfrom lutris.database.games import add_or_update, get_game_by_field\nfrom lutris.game import Game\nfrom lutris.installer import AUTO_ELF_EXE, AUTO_WIN32_EXE\nfrom lutris.installer.errors import ScriptingError\nfrom lutris.installer.installer_file import InstallerFile\nfrom lutris.installer.legacy import get_game_launcher\nfrom lutris.runners import import_runner\nfrom lutris.services import SERVICES\nfrom lutris.util.game_finder import find_linux_game_executable, find_windows_game_executable\nfrom lutris.util.log import logger\n\n\nclass LutrisInstaller: # pylint: disable=too-many-instance-attributes\n \"\"\"Represents a Lutris installer\"\"\"\n\n def __init__(self, installer, interpreter, service, appid):\n self.interpreter = interpreter\n self.installer = installer\n self.version = installer[\"version\"]\n self.slug = installer[\"slug\"]\n self.year = installer.get(\"year\")\n self.runner = installer[\"runner\"]\n self.script = installer.get(\"script\")\n self.game_name = installer[\"name\"]\n self.game_slug = installer[\"game_slug\"]\n self.service = self.get_service(initial=service)\n self.service_appid = self.get_appid(installer, initial=appid)\n self.variables = installer.get(\"variables\", {})\n self.files = [\n InstallerFile(self.game_slug, file_id, file_meta)\n for file_desc in self.script.get(\"files\", [])\n for file_id, file_meta in file_desc.items()\n ]\n self.requires = self.script.get(\"requires\")\n self.extends = self.script.get(\"extends\")\n self.game_id = self.get_game_id()\n\n def get_service(self, initial=None):\n if initial:\n return initial\n if \"steam\" in self.runner and \"steam\" in SERVICES:\n return SERVICES[\"steam\"]()\n version = self.version.lower()\n if \"humble\" in version and \"humblebundle\" in SERVICES:\n return SERVICES[\"humblebundle\"]()\n if \"gog\" in version and \"gog\" in SERVICES:\n return SERVICES[\"gog\"]()\n\n def get_appid(self, installer, initial=None):\n if initial:\n return initial\n if not self.service:\n return\n if self.service.id == \"steam\":\n return installer.get(\"steamid\")\n game_config = self.script.get(\"game\", {})\n if self.service.id == \"gog\":\n return game_config.get(\"gogid\") or installer.get(\"gogid\")\n if self.service.id == \"humblebundle\":\n return game_config.get(\"humbleid\") or installer.get(\"humblestoreid\")\n\n @property\n def script_pretty(self):\n \"\"\"Return a pretty print of the script\"\"\"\n return json.dumps(self.script, indent=4)\n\n def get_game_id(self):\n \"\"\"Return the ID of the game in the local DB if one exists\"\"\"\n # If the game is in the library and uninstalled, the first installation\n # updates it\n existing_game = get_game_by_field(self.game_slug, \"slug\")\n if existing_game and not existing_game[\"installed\"]:\n return existing_game[\"id\"]\n\n @property\n def creates_game_folder(self):\n \"\"\"Determines if an install script should create a game folder for the game\"\"\"\n if self.requires:\n # Game is an extension of an existing game, folder exists\n return False\n if self.runner == \"steam\":\n # Steam games installs in their steamapps directory\n return False\n if (\n self.files\n or self.script.get(\"game\", {}).get(\"gog\")\n or self.script.get(\"game\", {}).get(\"prefix\")\n ):\n return True\n command_names = [list(c.keys())[0] for c in self.script.get(\"installer\", [])]\n if \"insert-disc\" in command_names:\n return True\n return False\n\n def get_errors(self):\n \"\"\"Return potential errors in the script\"\"\"\n errors = []\n if not isinstance(self.script, dict):\n errors.append(\"Script must be a dictionary\")\n # Return early since the method assumes a dict\n return errors\n\n # Check that installers contains all required fields\n for field in (\"runner\", \"game_name\", \"game_slug\"):\n if not hasattr(self, field) or not getattr(self, field):\n errors.append(\"Missing field '%s'\" % field)\n\n # Check that libretro installers have a core specified\n if self.runner == \"libretro\":\n if \"game\" not in self.script or \"core\" not in self.script[\"game\"]:\n errors.append(\"Missing libretro core in game section\")\n\n # Check that Steam games have an AppID\n if self.runner == \"steam\":\n if not self.script.get(\"game\", {}).get(\"appid\"):\n errors.append(\"Missing appid for Steam game\")\n\n # Check that installers don't contain both 'requires' and 'extends'\n if self.script.get(\"requires\") and self.script.get(\"extends\"):\n errors.append(\"Scripts can't have both extends and requires\")\n return errors\n\n def pop_user_provided_file(self):\n \"\"\"Return and remove the first user provided file, which is used for game stores\"\"\"\n for index, file in enumerate(self.files):\n if file.url.startswith(\"N/A\"):\n self.files.pop(index)\n return file.id\n\n def prepare_game_files(self):\n \"\"\"Gathers necessary files before iterating through them.\"\"\"\n if not self.files:\n return\n if self.service:\n if self.service.online and not self.service.is_connected():\n logger.info(\"Not authenticated to %s\", self.service.id)\n return\n installer_file_id = self.pop_user_provided_file()\n if not installer_file_id:\n logger.warning(\"Could not find a file for this service\")\n return\n if self.service.has_extras:\n self.service.selected_extras = self.interpreter.extras\n installer_files = self.service.get_installer_files(self, installer_file_id)\n for installer_file in installer_files:\n self.files.append(installer_file)\n if not installer_files:\n # Failed to get the service game, put back a user provided file\n self.files.insert(0, \"N/A: Provider installer file\")\n\n def _substitute_config(self, script_config):\n \"\"\"Substitute values such as $GAMEDIR in a config dict.\"\"\"\n config = {}\n for key in script_config:\n if not isinstance(key, str):\n raise ScriptingError(_(\"Game config key must be a string\"), key)\n value = script_config[key]\n if str(value).lower() == 'true':\n value = True\n if str(value).lower() == 'false':\n value = False\n if isinstance(value, list):\n config[key] = [self.interpreter._substitute(i) for i in value]\n elif isinstance(value, dict):\n config[key] = {k: self.interpreter._substitute(v) for (k, v) in value.items()}\n elif isinstance(value, bool):\n config[key] = value\n else:\n config[key] = self.interpreter._substitute(value)\n return config\n\n def get_game_config(self):\n \"\"\"Return the game configuration\"\"\"\n if self.requires:\n # Load the base game config\n required_game = get_game_by_field(self.requires, field=\"installer_slug\")\n if not required_game:\n required_game = get_game_by_field(self.requires, field=\"slug\")\n if not required_game:\n raise ValueError(\"No game matched '%s' on installer_slug or slug\" % self.requires)\n base_config = LutrisConfig(\n runner_slug=self.runner, game_config_id=required_game[\"configpath\"]\n )\n config = base_config.game_level\n else:\n config = {\"game\": {}}\n\n # Config update\n if \"system\" in self.script:\n config[\"system\"] = self._substitute_config(self.script[\"system\"])\n if self.runner in self.script and self.script[self.runner]:\n config[self.runner] = self._substitute_config(self.script[self.runner])\n launcher, launcher_config = self.get_game_launcher_config(self.interpreter.game_files)\n if launcher:\n config[\"game\"][launcher] = launcher_config\n\n if \"game\" in self.script:\n try:\n config[\"game\"].update(self.script[\"game\"])\n except ValueError as err:\n raise ScriptingError(_(\"Invalid 'game' section\"), self.script[\"game\"]) from err\n config[\"game\"] = self._substitute_config(config[\"game\"])\n if AUTO_ELF_EXE in config[\"game\"].get(\"exe\", \"\"):\n config[\"game\"][\"exe\"] = find_linux_game_executable(self.interpreter.target_path,\n make_executable=True)\n elif AUTO_WIN32_EXE in config[\"game\"].get(\"exe\", \"\"):\n config[\"game\"][\"exe\"] = find_windows_game_executable(self.interpreter.target_path)\n return config\n\n def save(self):\n \"\"\"Write the game configuration in the DB and config file\"\"\"\n if self.extends:\n logger.info(\n \"This is an extension to %s, not creating a new game entry\",\n self.extends,\n )\n return\n configpath = write_game_config(self.slug, self.get_game_config())\n runner_inst = import_runner(self.runner)()\n if self.service:\n service_id = self.service.id\n else:\n service_id = None\n self.game_id = add_or_update(\n name=self.game_name,\n runner=self.runner,\n slug=self.game_slug,\n platform=runner_inst.get_platform(),\n directory=self.interpreter.target_path,\n installed=1,\n hidden=0,\n installer_slug=self.slug,\n parent_slug=self.requires,\n year=self.year,\n configpath=configpath,\n service=service_id,\n service_id=self.service_appid,\n id=self.game_id,\n )\n # This is a bit redundant but used to trigger the game-updated signal\n game = Game(self.game_id)\n game.save()\n\n def get_game_launcher_config(self, game_files):\n \"\"\"Game options such as exe or main_file can be added at the root of the\n script as a shortcut, this integrates them into the game config properly\n This should be deprecated. Game launchers should go in the game section.\n \"\"\"\n launcher, launcher_value = get_game_launcher(self.script)\n if isinstance(launcher_value, list):\n launcher_values = []\n for game_file in launcher_value:\n if game_file in game_files:\n launcher_values.append(game_files[game_file])\n else:\n launcher_values.append(game_file)\n return launcher, launcher_values\n if launcher_value:\n if launcher_value in game_files:\n launcher_value = game_files[launcher_value]\n elif self.interpreter.target_path and os.path.exists(\n os.path.join(self.interpreter.target_path, launcher_value)\n ):\n launcher_value = os.path.join(self.interpreter.target_path, launcher_value)\n return launcher, launcher_value\n", "path": "lutris/installer/installer.py"}]} | 3,538 | 200 |
gh_patches_debug_14262 | rasdani/github-patches | git_diff | jupyterhub__jupyterhub-215 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The 'user' trait of a SingleUserNotebookApp instance must be a unicode string, but a value of 1384482<class 'int'> was specified.
I'm using the `SudoSpawner` in combination with Active Directory+PAM based authentication. This results in users being created on the Linux server with numeric IDs that map to AD users. This works fine with users whos AD logins are name-based, but users with numeric IDs cannot log in.
Looking at the logs I've found that the error occurs when trying to launch the instance of `SingleUserNotebookApp` due to the user ID being interpreted as an `int` rather than `str`.
```
The 'user' trait of a SingleUserNotebookApp instance must be a unicode string,
but a value of 1384482 <class 'int'> was specified.
mfitzp@bms-jupyter:~$ /usr/bin/python3 -m jupyterhub.singleuser --user="1384482"
--port=36359 --cookie-name=jupyter-hub-token-1384482 --base-url=/user/1384482
--hub-prefix=/hub/ --hub-api-url=http://localhost:8081/hub/api --ip=localhost
```
Looking at the source the `user` property is defined as a `Unicode` traitlet so I'm unsure what is going on here? Is this perhaps a IPython issue? In `argparse` it's possible to define the type for command-line arguments to be mapped to, but I could not find the equivalent in the traitlet system.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `jupyterhub/singleuser.py`
Content:
```
1 #!/usr/bin/env python
2 """Extend regular notebook server to be aware of multiuser things."""
3
4 # Copyright (c) Jupyter Development Team.
5 # Distributed under the terms of the Modified BSD License.
6
7 import os
8 from urllib.parse import quote
9
10 import requests
11
12 from tornado import ioloop
13 from tornado.web import HTTPError
14
15 from IPython.utils.traitlets import (
16 Integer,
17 Unicode,
18 )
19
20 from IPython.html.notebookapp import NotebookApp
21 from IPython.html.auth.login import LoginHandler
22 from IPython.html.auth.logout import LogoutHandler
23
24 from IPython.html.utils import url_path_join
25
26
27 from distutils.version import LooseVersion as V
28
29 import IPython
30 if V(IPython.__version__) < V('3.0'):
31 raise ImportError("JupyterHub Requires IPython >= 3.0, found %s" % IPython.__version__)
32
33 # Define two methods to attach to AuthenticatedHandler,
34 # which authenticate via the central auth server.
35
36 class JupyterHubLoginHandler(LoginHandler):
37 @staticmethod
38 def login_available(settings):
39 return True
40
41 @staticmethod
42 def verify_token(self, cookie_name, encrypted_cookie):
43 """method for token verification"""
44 cookie_cache = self.settings['cookie_cache']
45 if encrypted_cookie in cookie_cache:
46 # we've seen this token before, don't ask upstream again
47 return cookie_cache[encrypted_cookie]
48
49 hub_api_url = self.settings['hub_api_url']
50 hub_api_key = self.settings['hub_api_key']
51 r = requests.get(url_path_join(
52 hub_api_url, "authorizations/cookie", cookie_name, quote(encrypted_cookie, safe=''),
53 ),
54 headers = {'Authorization' : 'token %s' % hub_api_key},
55 )
56 if r.status_code == 404:
57 data = None
58 elif r.status_code == 403:
59 self.log.error("I don't have permission to verify cookies, my auth token may have expired: [%i] %s", r.status_code, r.reason)
60 raise HTTPError(500, "Permission failure checking authorization, I may need to be restarted")
61 elif r.status_code >= 500:
62 self.log.error("Upstream failure verifying auth token: [%i] %s", r.status_code, r.reason)
63 raise HTTPError(502, "Failed to check authorization (upstream problem)")
64 elif r.status_code >= 400:
65 self.log.warn("Failed to check authorization: [%i] %s", r.status_code, r.reason)
66 raise HTTPError(500, "Failed to check authorization")
67 else:
68 data = r.json()
69 cookie_cache[encrypted_cookie] = data
70 return data
71
72 @staticmethod
73 def get_user(self):
74 """alternative get_current_user to query the central server"""
75 # only allow this to be called once per handler
76 # avoids issues if an error is raised,
77 # since this may be called again when trying to render the error page
78 if hasattr(self, '_cached_user'):
79 return self._cached_user
80
81 self._cached_user = None
82 my_user = self.settings['user']
83 encrypted_cookie = self.get_cookie(self.cookie_name)
84 if encrypted_cookie:
85 auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)
86 if not auth_data:
87 # treat invalid token the same as no token
88 return None
89 user = auth_data['user']
90 if user == my_user:
91 self._cached_user = user
92 return user
93 else:
94 return None
95 else:
96 self.log.debug("No token cookie")
97 return None
98
99
100 class JupyterHubLogoutHandler(LogoutHandler):
101 def get(self):
102 self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))
103
104
105 # register new hub related command-line aliases
106 aliases = NotebookApp.aliases.get_default_value()
107 aliases.update({
108 'user' : 'SingleUserNotebookApp.user',
109 'cookie-name': 'SingleUserNotebookApp.cookie_name',
110 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',
111 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',
112 'base-url': 'SingleUserNotebookApp.base_url',
113 })
114
115 class SingleUserNotebookApp(NotebookApp):
116 """A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
117 user = Unicode(config=True)
118 def _user_changed(self, name, old, new):
119 self.log.name = new
120 cookie_name = Unicode(config=True)
121 hub_prefix = Unicode(config=True)
122 hub_api_url = Unicode(config=True)
123 aliases = aliases
124 open_browser = False
125 login_handler_class = JupyterHubLoginHandler
126 logout_handler_class = JupyterHubLogoutHandler
127
128 cookie_cache_lifetime = Integer(
129 config=True,
130 default_value=300,
131 allow_none=True,
132 help="""
133 Time, in seconds, that we cache a validated cookie before requiring
134 revalidation with the hub.
135 """,
136 )
137
138 def _log_datefmt_default(self):
139 """Exclude date from default date format"""
140 return "%Y-%m-%d %H:%M:%S"
141
142 def _log_format_default(self):
143 """override default log format to include time"""
144 return "%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s"
145
146 def _confirm_exit(self):
147 # disable the exit confirmation for background notebook processes
148 ioloop.IOLoop.instance().stop()
149
150 def _clear_cookie_cache(self):
151 self.log.info("Clearing cookie cache")
152 self.tornado_settings['cookie_cache'].clear()
153
154 def initialize(self, argv=None):
155 super().initialize(argv=argv)
156
157 # Start a PeriodicCallback to clear cached cookies. This forces us to
158 # revalidate our user with the Hub at least every
159 # `cookie_cache_lifetime` seconds.
160 if self.cookie_cache_lifetime:
161 ioloop.PeriodicCallback(
162 self._clear_cookie_cache,
163 self.cookie_cache_lifetime * 1e3,
164 ).start()
165
166 def init_webapp(self):
167 # load the hub related settings into the tornado settings dict
168 env = os.environ
169 s = self.tornado_settings
170 s['cookie_cache'] = {}
171 s['user'] = self.user
172 s['hub_api_key'] = env.pop('JPY_API_TOKEN')
173 s['hub_prefix'] = self.hub_prefix
174 s['cookie_name'] = self.cookie_name
175 s['login_url'] = url_path_join(self.hub_prefix, 'login')
176 s['hub_api_url'] = self.hub_api_url
177 super(SingleUserNotebookApp, self).init_webapp()
178
179
180 def main():
181 return SingleUserNotebookApp.launch_instance()
182
183
184 if __name__ == "__main__":
185 main()
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py
--- a/jupyterhub/singleuser.py
+++ b/jupyterhub/singleuser.py
@@ -15,6 +15,7 @@
from IPython.utils.traitlets import (
Integer,
Unicode,
+ CUnicode,
)
from IPython.html.notebookapp import NotebookApp
@@ -114,7 +115,7 @@
class SingleUserNotebookApp(NotebookApp):
"""A Subclass of the regular NotebookApp that is aware of the parent multiuser context."""
- user = Unicode(config=True)
+ user = CUnicode(config=True)
def _user_changed(self, name, old, new):
self.log.name = new
cookie_name = Unicode(config=True)
| {"golden_diff": "diff --git a/jupyterhub/singleuser.py b/jupyterhub/singleuser.py\n--- a/jupyterhub/singleuser.py\n+++ b/jupyterhub/singleuser.py\n@@ -15,6 +15,7 @@\n from IPython.utils.traitlets import (\n Integer,\n Unicode,\n+ CUnicode,\n )\n \n from IPython.html.notebookapp import NotebookApp\n@@ -114,7 +115,7 @@\n \n class SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n- user = Unicode(config=True)\n+ user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n", "issue": "The 'user' trait of a SingleUserNotebookApp instance must be a unicode string, but a value of 1384482<class 'int'> was specified.\nI'm using the `SudoSpawner` in combination with Active Directory+PAM based authentication. This results in users being created on the Linux server with numeric IDs that map to AD users. This works fine with users whos AD logins are name-based, but users with numeric IDs cannot log in.\n\nLooking at the logs I've found that the error occurs when trying to launch the instance of `SingleUserNotebookApp` due to the user ID being interpreted as an `int` rather than `str`. \n\n```\nThe 'user' trait of a SingleUserNotebookApp instance must be a unicode string, \n but a value of 1384482 <class 'int'> was specified.\nmfitzp@bms-jupyter:~$ /usr/bin/python3 -m jupyterhub.singleuser --user=\"1384482\" \n --port=36359 --cookie-name=jupyter-hub-token-1384482 --base-url=/user/1384482\n --hub-prefix=/hub/ --hub-api-url=http://localhost:8081/hub/api --ip=localhost \n```\n\nLooking at the source the `user` property is defined as a `Unicode` traitlet so I'm unsure what is going on here? Is this perhaps a IPython issue? In `argparse` it's possible to define the type for command-line arguments to be mapped to, but I could not find the equivalent in the traitlet system.\n\n", "before_files": [{"content": "#!/usr/bin/env python\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nfrom urllib.parse import quote\n\nimport requests\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom IPython.utils.traitlets import (\n Integer,\n Unicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['user']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = NotebookApp.aliases.get_default_value()\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = Unicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.info(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n\n def initialize(self, argv=None):\n super().initialize(argv=argv)\n\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n\n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = url_path_join(self.hub_prefix, 'login')\n s['hub_api_url'] = self.hub_api_url\n super(SingleUserNotebookApp, self).init_webapp()\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}], "after_files": [{"content": "#!/usr/bin/env python\n\"\"\"Extend regular notebook server to be aware of multiuser things.\"\"\"\n\n# Copyright (c) Jupyter Development Team.\n# Distributed under the terms of the Modified BSD License.\n\nimport os\nfrom urllib.parse import quote\n\nimport requests\n\nfrom tornado import ioloop\nfrom tornado.web import HTTPError\n\nfrom IPython.utils.traitlets import (\n Integer,\n Unicode,\n CUnicode,\n)\n\nfrom IPython.html.notebookapp import NotebookApp\nfrom IPython.html.auth.login import LoginHandler\nfrom IPython.html.auth.logout import LogoutHandler\n\nfrom IPython.html.utils import url_path_join\n\n\nfrom distutils.version import LooseVersion as V\n\nimport IPython\nif V(IPython.__version__) < V('3.0'):\n raise ImportError(\"JupyterHub Requires IPython >= 3.0, found %s\" % IPython.__version__)\n\n# Define two methods to attach to AuthenticatedHandler,\n# which authenticate via the central auth server.\n\nclass JupyterHubLoginHandler(LoginHandler):\n @staticmethod\n def login_available(settings):\n return True\n \n @staticmethod\n def verify_token(self, cookie_name, encrypted_cookie):\n \"\"\"method for token verification\"\"\"\n cookie_cache = self.settings['cookie_cache']\n if encrypted_cookie in cookie_cache:\n # we've seen this token before, don't ask upstream again\n return cookie_cache[encrypted_cookie]\n \n hub_api_url = self.settings['hub_api_url']\n hub_api_key = self.settings['hub_api_key']\n r = requests.get(url_path_join(\n hub_api_url, \"authorizations/cookie\", cookie_name, quote(encrypted_cookie, safe=''),\n ),\n headers = {'Authorization' : 'token %s' % hub_api_key},\n )\n if r.status_code == 404:\n data = None\n elif r.status_code == 403:\n self.log.error(\"I don't have permission to verify cookies, my auth token may have expired: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Permission failure checking authorization, I may need to be restarted\")\n elif r.status_code >= 500:\n self.log.error(\"Upstream failure verifying auth token: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(502, \"Failed to check authorization (upstream problem)\")\n elif r.status_code >= 400:\n self.log.warn(\"Failed to check authorization: [%i] %s\", r.status_code, r.reason)\n raise HTTPError(500, \"Failed to check authorization\")\n else:\n data = r.json()\n cookie_cache[encrypted_cookie] = data\n return data\n \n @staticmethod\n def get_user(self):\n \"\"\"alternative get_current_user to query the central server\"\"\"\n # only allow this to be called once per handler\n # avoids issues if an error is raised,\n # since this may be called again when trying to render the error page\n if hasattr(self, '_cached_user'):\n return self._cached_user\n \n self._cached_user = None\n my_user = self.settings['user']\n encrypted_cookie = self.get_cookie(self.cookie_name)\n if encrypted_cookie:\n auth_data = JupyterHubLoginHandler.verify_token(self, self.cookie_name, encrypted_cookie)\n if not auth_data:\n # treat invalid token the same as no token\n return None\n user = auth_data['user']\n if user == my_user:\n self._cached_user = user\n return user\n else:\n return None\n else:\n self.log.debug(\"No token cookie\")\n return None\n\n\nclass JupyterHubLogoutHandler(LogoutHandler):\n def get(self):\n self.redirect(url_path_join(self.settings['hub_prefix'], 'logout'))\n\n\n# register new hub related command-line aliases\naliases = NotebookApp.aliases.get_default_value()\naliases.update({\n 'user' : 'SingleUserNotebookApp.user',\n 'cookie-name': 'SingleUserNotebookApp.cookie_name',\n 'hub-prefix': 'SingleUserNotebookApp.hub_prefix',\n 'hub-api-url': 'SingleUserNotebookApp.hub_api_url',\n 'base-url': 'SingleUserNotebookApp.base_url',\n})\n\nclass SingleUserNotebookApp(NotebookApp):\n \"\"\"A Subclass of the regular NotebookApp that is aware of the parent multiuser context.\"\"\"\n user = CUnicode(config=True)\n def _user_changed(self, name, old, new):\n self.log.name = new\n cookie_name = Unicode(config=True)\n hub_prefix = Unicode(config=True)\n hub_api_url = Unicode(config=True)\n aliases = aliases\n open_browser = False\n login_handler_class = JupyterHubLoginHandler\n logout_handler_class = JupyterHubLogoutHandler\n\n cookie_cache_lifetime = Integer(\n config=True,\n default_value=300,\n allow_none=True,\n help=\"\"\"\n Time, in seconds, that we cache a validated cookie before requiring\n revalidation with the hub.\n \"\"\",\n )\n\n def _log_datefmt_default(self):\n \"\"\"Exclude date from default date format\"\"\"\n return \"%Y-%m-%d %H:%M:%S\"\n\n def _log_format_default(self):\n \"\"\"override default log format to include time\"\"\"\n return \"%(color)s[%(levelname)1.1s %(asctime)s.%(msecs).03d %(name)s %(module)s:%(lineno)d]%(end_color)s %(message)s\"\n\n def _confirm_exit(self):\n # disable the exit confirmation for background notebook processes\n ioloop.IOLoop.instance().stop()\n\n def _clear_cookie_cache(self):\n self.log.info(\"Clearing cookie cache\")\n self.tornado_settings['cookie_cache'].clear()\n\n def initialize(self, argv=None):\n super().initialize(argv=argv)\n\n # Start a PeriodicCallback to clear cached cookies. This forces us to\n # revalidate our user with the Hub at least every\n # `cookie_cache_lifetime` seconds.\n if self.cookie_cache_lifetime:\n ioloop.PeriodicCallback(\n self._clear_cookie_cache,\n self.cookie_cache_lifetime * 1e3,\n ).start()\n\n def init_webapp(self):\n # load the hub related settings into the tornado settings dict\n env = os.environ\n s = self.tornado_settings\n s['cookie_cache'] = {}\n s['user'] = self.user\n s['hub_api_key'] = env.pop('JPY_API_TOKEN')\n s['hub_prefix'] = self.hub_prefix\n s['cookie_name'] = self.cookie_name\n s['login_url'] = url_path_join(self.hub_prefix, 'login')\n s['hub_api_url'] = self.hub_api_url\n super(SingleUserNotebookApp, self).init_webapp()\n\n\ndef main():\n return SingleUserNotebookApp.launch_instance()\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "jupyterhub/singleuser.py"}]} | 2,580 | 172 |
gh_patches_debug_28593 | rasdani/github-patches | git_diff | Kinto__kinto-827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consistency on creation ? 403 versus 412
If we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.
If we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.
In the documentation we don't make those corner cases very clear.
Is that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.
Consistency on creation ? 403 versus 412
If we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.
If we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.
In the documentation we don't make those corner cases very clear.
Is that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/authorization.py`
Content:
```
1 import functools
2
3 from pyramid.settings import aslist
4 from pyramid.security import IAuthorizationPolicy, Authenticated
5 from zope.interface import implementer
6
7 from kinto.core import utils
8 from kinto.core.storage import exceptions as storage_exceptions
9 from kinto.core.authentication import prefixed_userid
10
11 # A permission is called "dynamic" when it's computed at request time.
12 DYNAMIC = 'dynamic'
13
14 # When permission is set to "private", only the current user is allowed.
15 PRIVATE = 'private'
16
17
18 def groupfinder(userid, request):
19 """Fetch principals from permission backend for the specified `userid`.
20
21 This is plugged by default using the ``multiauth.groupfinder`` setting.
22 """
23 backend = getattr(request.registry, 'permission', None)
24 # Permission backend not configured. Ignore.
25 if not backend:
26 return []
27
28 # Safety check when Kinto-Core is used without pyramid_multiauth.
29 if request.prefixed_userid:
30 userid = request.prefixed_userid
31
32 # Query the permission backend only once per request (e.g. batch).
33 reify_key = userid + '_principals'
34 if reify_key not in request.bound_data:
35 principals = backend.get_user_principals(userid)
36 request.bound_data[reify_key] = principals
37
38 return request.bound_data[reify_key]
39
40
41 @implementer(IAuthorizationPolicy)
42 class AuthorizationPolicy(object):
43 """Default authorization class, that leverages the permission backend
44 for shareable resources.
45 """
46
47 get_bound_permissions = None
48 """Callable that takes an object id and a permission and returns
49 a list of tuples (<object id>, <permission>). Useful when objects
50 permission depend on others."""
51
52 def permits(self, context, principals, permission):
53 if permission == PRIVATE:
54 return Authenticated in principals
55
56 # Add prefixed user id to principals.
57 prefixed_userid = context.get_prefixed_userid()
58 if prefixed_userid and ':' in prefixed_userid:
59 principals = principals + [prefixed_userid]
60 prefix, user_id = prefixed_userid.split(':', 1)
61 # Remove unprefixed user id to avoid conflicts.
62 # (it is added via Pyramid Authn policy effective principals)
63 if user_id in principals:
64 principals.remove(user_id)
65 # Retro-compatibility with cliquet 2.0 '_' user id prefixes.
66 # Just in case it was used in permissions definitions.
67 principals.append('%s_%s' % (prefix, user_id))
68
69 if permission == DYNAMIC:
70 permission = context.required_permission
71
72 if permission == 'create':
73 permission = '%s:%s' % (context.resource_name, permission)
74
75 if context.allowed_principals:
76 allowed = bool(set(context.allowed_principals) & set(principals))
77 else:
78 object_id = context.permission_object_id
79 if self.get_bound_permissions is None:
80 bound_perms = [(object_id, permission)]
81 else:
82 bound_perms = self.get_bound_permissions(object_id, permission)
83 allowed = context.check_permission(principals, bound_perms)
84
85 # If not allowed on this collection, but some records are shared with
86 # the current user, then authorize.
87 # The ShareableResource class will take care of the filtering.
88 is_list_operation = (context.on_collection and
89 not permission.endswith('create'))
90 if not allowed and is_list_operation:
91 shared = context.fetch_shared_records(permission,
92 principals,
93 self.get_bound_permissions)
94 allowed = shared is not None
95
96 return allowed
97
98 def principals_allowed_by_permission(self, context, permission):
99 raise NotImplementedError() # PRAGMA NOCOVER
100
101
102 class RouteFactory(object):
103 resource_name = None
104 on_collection = False
105 required_permission = None
106 allowed_principals = None
107 permission_object_id = None
108 current_record = None
109 shared_ids = None
110
111 method_permissions = {
112 "head": "read",
113 "get": "read",
114 "post": "create",
115 "delete": "write",
116 "patch": "write"
117 }
118
119 def __init__(self, request):
120 # Make it available for the authorization policy.
121 self.get_prefixed_userid = functools.partial(prefixed_userid, request)
122
123 # Store some shortcuts.
124 permission = request.registry.permission
125 self.check_permission = permission.check_permission
126 self._get_accessible_objects = permission.get_accessible_objects
127
128 # Store current resource and required permission.
129 service = utils.current_service(request)
130 is_on_resource = (service is not None and
131 hasattr(service, 'viewset') and
132 hasattr(service, 'resource'))
133 if is_on_resource:
134 self.resource_name = request.current_resource_name
135 self.on_collection = getattr(service, "type", None) == "collection"
136
137 self.permission_object_id, self.required_permission = (
138 self._find_required_permission(request, service))
139
140 # To obtain shared records on a collection endpoint, use a match:
141 self._object_id_match = self.get_permission_object_id(request, '*')
142
143 # Check if principals are allowed explicitly from settings.
144 settings = request.registry.settings
145 setting = '%s_%s_principals' % (self.resource_name,
146 self.required_permission)
147 self.allowed_principals = aslist(settings.get(setting, ''))
148
149 def fetch_shared_records(self, perm, principals, get_bound_permissions):
150 """Fetch records that are readable or writable for the current
151 principals.
152
153 See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`
154
155 If no record is shared, it returns None.
156
157 .. warning::
158 This sets the ``shared_ids`` attribute to the context with the
159 return value. The attribute is then read by
160 :class:`kinto.core.resource.ShareableResource`
161 """
162 if get_bound_permissions:
163 bound_perms = get_bound_permissions(self._object_id_match, perm)
164 else:
165 bound_perms = [(self._object_id_match, perm)]
166 by_obj_id = self._get_accessible_objects(principals, bound_perms)
167 ids = by_obj_id.keys()
168 if len(ids) > 0:
169 # Store for later use in ``ShareableResource``.
170 self.shared_ids = [self._extract_object_id(id_) for id_ in ids]
171 else:
172 self.shared_ids = None
173
174 return self.shared_ids
175
176 def get_permission_object_id(self, request, object_id=None):
177 """Returns the permission object id for the current request.
178 In the nominal case, it is just the current URI without version prefix.
179 For collections, it is the related record URI using the specified
180 `object_id`.
181
182 See :meth:`kinto.core.resource.model.SharableModel` and
183 :meth:`kinto.core.authorization.RouteFactory.__init__`
184 """
185 object_uri = utils.strip_uri_prefix(request.path)
186
187 if self.on_collection and object_id is not None:
188 # With the current request on a collection, the record URI must
189 # be found out by inspecting the collection service and its sibling
190 # record service.
191 matchdict = request.matchdict.copy()
192 matchdict['id'] = object_id
193 try:
194 object_uri = utils.instance_uri(request,
195 self.resource_name,
196 **matchdict)
197 if object_id == '*':
198 object_uri = object_uri.replace('%2A', '*')
199 except KeyError:
200 # Maybe the resource has no single record endpoint.
201 # We consider that object URIs in permissions backend will
202 # be stored naively:
203 object_uri = object_uri + '/' + object_id
204
205 return object_uri
206
207 def _extract_object_id(self, object_uri):
208 # XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']
209 return object_uri.split('/')[-1]
210
211 def _find_required_permission(self, request, service):
212 """Find out what is the permission object id and the required
213 permission.
214
215 .. note::
216 This method saves an attribute ``self.current_record`` used
217 in :class:`kinto.core.resource.UserResource`.
218 """
219 # By default, it's a URI a and permission associated to the method.
220 permission_object_id = self.get_permission_object_id(request)
221 method = request.method.lower()
222 required_permission = self.method_permissions.get(method)
223
224 # In the case of a "PUT", check if the targetted record already
225 # exists, return "write" if it does, "create" otherwise.
226 if request.method.lower() == "put":
227 resource = service.resource(request=request, context=self)
228 try:
229 record = resource.model.get_record(resource.record_id)
230 # Save a reference, to avoid refetching from storage in
231 # resource.
232 self.current_record = record
233 except storage_exceptions.RecordNotFoundError:
234 # The record does not exist, the permission to create on
235 # the related collection is required.
236 permission_object_id = service.collection_path.format(
237 **request.matchdict)
238 required_permission = "create"
239 else:
240 required_permission = "write"
241
242 return (permission_object_id, required_permission)
243
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/kinto/core/authorization.py b/kinto/core/authorization.py
--- a/kinto/core/authorization.py
+++ b/kinto/core/authorization.py
@@ -221,6 +221,9 @@
method = request.method.lower()
required_permission = self.method_permissions.get(method)
+ # For create permission, the object id is the plural endpoint.
+ collection_path = service.collection_path.format(**request.matchdict)
+
# In the case of a "PUT", check if the targetted record already
# exists, return "write" if it does, "create" otherwise.
if request.method.lower() == "put":
@@ -233,10 +236,15 @@
except storage_exceptions.RecordNotFoundError:
# The record does not exist, the permission to create on
# the related collection is required.
- permission_object_id = service.collection_path.format(
- **request.matchdict)
+ permission_object_id = collection_path
required_permission = "create"
else:
- required_permission = "write"
+ # For safe creations, the user needs a create permission.
+ # See Kinto/kinto#792
+ if request.headers.get('If-None-Match') == '*':
+ permission_object_id = collection_path
+ required_permission = "create"
+ else:
+ required_permission = "write"
return (permission_object_id, required_permission)
| {"golden_diff": "diff --git a/kinto/core/authorization.py b/kinto/core/authorization.py\n--- a/kinto/core/authorization.py\n+++ b/kinto/core/authorization.py\n@@ -221,6 +221,9 @@\n method = request.method.lower()\n required_permission = self.method_permissions.get(method)\n \n+ # For create permission, the object id is the plural endpoint.\n+ collection_path = service.collection_path.format(**request.matchdict)\n+\n # In the case of a \"PUT\", check if the targetted record already\n # exists, return \"write\" if it does, \"create\" otherwise.\n if request.method.lower() == \"put\":\n@@ -233,10 +236,15 @@\n except storage_exceptions.RecordNotFoundError:\n # The record does not exist, the permission to create on\n # the related collection is required.\n- permission_object_id = service.collection_path.format(\n- **request.matchdict)\n+ permission_object_id = collection_path\n required_permission = \"create\"\n else:\n- required_permission = \"write\"\n+ # For safe creations, the user needs a create permission.\n+ # See Kinto/kinto#792\n+ if request.headers.get('If-None-Match') == '*':\n+ permission_object_id = collection_path\n+ required_permission = \"create\"\n+ else:\n+ required_permission = \"write\"\n \n return (permission_object_id, required_permission)\n", "issue": "Consistency on creation ? 403 versus 412\nIf we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.\n\nIf we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.\nIn the documentation we don't make those corner cases very clear.\n\nIs that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.\n\nConsistency on creation ? 403 versus 412\nIf we try to create an object that was already created by someone else, the server returns `403 Unauthorized`.\n\nIf we add the header `If-None-Match: *`, it also returns `403` even I have the permission to create new objects (and not `412 Precondition failed`), which can be confusing.\nIn the documentation we don't make those corner cases very clear.\n\nIs that consistent by the way ? Knowing that we want to prevent Alice to know what objects ids Bob has created if she has no permission to read them.\n\n", "before_files": [{"content": "import functools\n\nfrom pyramid.settings import aslist\nfrom pyramid.security import IAuthorizationPolicy, Authenticated\nfrom zope.interface import implementer\n\nfrom kinto.core import utils\nfrom kinto.core.storage import exceptions as storage_exceptions\nfrom kinto.core.authentication import prefixed_userid\n\n# A permission is called \"dynamic\" when it's computed at request time.\nDYNAMIC = 'dynamic'\n\n# When permission is set to \"private\", only the current user is allowed.\nPRIVATE = 'private'\n\n\ndef groupfinder(userid, request):\n \"\"\"Fetch principals from permission backend for the specified `userid`.\n\n This is plugged by default using the ``multiauth.groupfinder`` setting.\n \"\"\"\n backend = getattr(request.registry, 'permission', None)\n # Permission backend not configured. Ignore.\n if not backend:\n return []\n\n # Safety check when Kinto-Core is used without pyramid_multiauth.\n if request.prefixed_userid:\n userid = request.prefixed_userid\n\n # Query the permission backend only once per request (e.g. batch).\n reify_key = userid + '_principals'\n if reify_key not in request.bound_data:\n principals = backend.get_user_principals(userid)\n request.bound_data[reify_key] = principals\n\n return request.bound_data[reify_key]\n\n\n@implementer(IAuthorizationPolicy)\nclass AuthorizationPolicy(object):\n \"\"\"Default authorization class, that leverages the permission backend\n for shareable resources.\n \"\"\"\n\n get_bound_permissions = None\n \"\"\"Callable that takes an object id and a permission and returns\n a list of tuples (<object id>, <permission>). Useful when objects\n permission depend on others.\"\"\"\n\n def permits(self, context, principals, permission):\n if permission == PRIVATE:\n return Authenticated in principals\n\n # Add prefixed user id to principals.\n prefixed_userid = context.get_prefixed_userid()\n if prefixed_userid and ':' in prefixed_userid:\n principals = principals + [prefixed_userid]\n prefix, user_id = prefixed_userid.split(':', 1)\n # Remove unprefixed user id to avoid conflicts.\n # (it is added via Pyramid Authn policy effective principals)\n if user_id in principals:\n principals.remove(user_id)\n # Retro-compatibility with cliquet 2.0 '_' user id prefixes.\n # Just in case it was used in permissions definitions.\n principals.append('%s_%s' % (prefix, user_id))\n\n if permission == DYNAMIC:\n permission = context.required_permission\n\n if permission == 'create':\n permission = '%s:%s' % (context.resource_name, permission)\n\n if context.allowed_principals:\n allowed = bool(set(context.allowed_principals) & set(principals))\n else:\n object_id = context.permission_object_id\n if self.get_bound_permissions is None:\n bound_perms = [(object_id, permission)]\n else:\n bound_perms = self.get_bound_permissions(object_id, permission)\n allowed = context.check_permission(principals, bound_perms)\n\n # If not allowed on this collection, but some records are shared with\n # the current user, then authorize.\n # The ShareableResource class will take care of the filtering.\n is_list_operation = (context.on_collection and\n not permission.endswith('create'))\n if not allowed and is_list_operation:\n shared = context.fetch_shared_records(permission,\n principals,\n self.get_bound_permissions)\n allowed = shared is not None\n\n return allowed\n\n def principals_allowed_by_permission(self, context, permission):\n raise NotImplementedError() # PRAGMA NOCOVER\n\n\nclass RouteFactory(object):\n resource_name = None\n on_collection = False\n required_permission = None\n allowed_principals = None\n permission_object_id = None\n current_record = None\n shared_ids = None\n\n method_permissions = {\n \"head\": \"read\",\n \"get\": \"read\",\n \"post\": \"create\",\n \"delete\": \"write\",\n \"patch\": \"write\"\n }\n\n def __init__(self, request):\n # Make it available for the authorization policy.\n self.get_prefixed_userid = functools.partial(prefixed_userid, request)\n\n # Store some shortcuts.\n permission = request.registry.permission\n self.check_permission = permission.check_permission\n self._get_accessible_objects = permission.get_accessible_objects\n\n # Store current resource and required permission.\n service = utils.current_service(request)\n is_on_resource = (service is not None and\n hasattr(service, 'viewset') and\n hasattr(service, 'resource'))\n if is_on_resource:\n self.resource_name = request.current_resource_name\n self.on_collection = getattr(service, \"type\", None) == \"collection\"\n\n self.permission_object_id, self.required_permission = (\n self._find_required_permission(request, service))\n\n # To obtain shared records on a collection endpoint, use a match:\n self._object_id_match = self.get_permission_object_id(request, '*')\n\n # Check if principals are allowed explicitly from settings.\n settings = request.registry.settings\n setting = '%s_%s_principals' % (self.resource_name,\n self.required_permission)\n self.allowed_principals = aslist(settings.get(setting, ''))\n\n def fetch_shared_records(self, perm, principals, get_bound_permissions):\n \"\"\"Fetch records that are readable or writable for the current\n principals.\n\n See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`\n\n If no record is shared, it returns None.\n\n .. warning::\n This sets the ``shared_ids`` attribute to the context with the\n return value. The attribute is then read by\n :class:`kinto.core.resource.ShareableResource`\n \"\"\"\n if get_bound_permissions:\n bound_perms = get_bound_permissions(self._object_id_match, perm)\n else:\n bound_perms = [(self._object_id_match, perm)]\n by_obj_id = self._get_accessible_objects(principals, bound_perms)\n ids = by_obj_id.keys()\n if len(ids) > 0:\n # Store for later use in ``ShareableResource``.\n self.shared_ids = [self._extract_object_id(id_) for id_ in ids]\n else:\n self.shared_ids = None\n\n return self.shared_ids\n\n def get_permission_object_id(self, request, object_id=None):\n \"\"\"Returns the permission object id for the current request.\n In the nominal case, it is just the current URI without version prefix.\n For collections, it is the related record URI using the specified\n `object_id`.\n\n See :meth:`kinto.core.resource.model.SharableModel` and\n :meth:`kinto.core.authorization.RouteFactory.__init__`\n \"\"\"\n object_uri = utils.strip_uri_prefix(request.path)\n\n if self.on_collection and object_id is not None:\n # With the current request on a collection, the record URI must\n # be found out by inspecting the collection service and its sibling\n # record service.\n matchdict = request.matchdict.copy()\n matchdict['id'] = object_id\n try:\n object_uri = utils.instance_uri(request,\n self.resource_name,\n **matchdict)\n if object_id == '*':\n object_uri = object_uri.replace('%2A', '*')\n except KeyError:\n # Maybe the resource has no single record endpoint.\n # We consider that object URIs in permissions backend will\n # be stored naively:\n object_uri = object_uri + '/' + object_id\n\n return object_uri\n\n def _extract_object_id(self, object_uri):\n # XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']\n return object_uri.split('/')[-1]\n\n def _find_required_permission(self, request, service):\n \"\"\"Find out what is the permission object id and the required\n permission.\n\n .. note::\n This method saves an attribute ``self.current_record`` used\n in :class:`kinto.core.resource.UserResource`.\n \"\"\"\n # By default, it's a URI a and permission associated to the method.\n permission_object_id = self.get_permission_object_id(request)\n method = request.method.lower()\n required_permission = self.method_permissions.get(method)\n\n # In the case of a \"PUT\", check if the targetted record already\n # exists, return \"write\" if it does, \"create\" otherwise.\n if request.method.lower() == \"put\":\n resource = service.resource(request=request, context=self)\n try:\n record = resource.model.get_record(resource.record_id)\n # Save a reference, to avoid refetching from storage in\n # resource.\n self.current_record = record\n except storage_exceptions.RecordNotFoundError:\n # The record does not exist, the permission to create on\n # the related collection is required.\n permission_object_id = service.collection_path.format(\n **request.matchdict)\n required_permission = \"create\"\n else:\n required_permission = \"write\"\n\n return (permission_object_id, required_permission)\n", "path": "kinto/core/authorization.py"}], "after_files": [{"content": "import functools\n\nfrom pyramid.settings import aslist\nfrom pyramid.security import IAuthorizationPolicy, Authenticated\nfrom zope.interface import implementer\n\nfrom kinto.core import utils\nfrom kinto.core.storage import exceptions as storage_exceptions\nfrom kinto.core.authentication import prefixed_userid\n\n# A permission is called \"dynamic\" when it's computed at request time.\nDYNAMIC = 'dynamic'\n\n# When permission is set to \"private\", only the current user is allowed.\nPRIVATE = 'private'\n\n\ndef groupfinder(userid, request):\n \"\"\"Fetch principals from permission backend for the specified `userid`.\n\n This is plugged by default using the ``multiauth.groupfinder`` setting.\n \"\"\"\n backend = getattr(request.registry, 'permission', None)\n # Permission backend not configured. Ignore.\n if not backend:\n return []\n\n # Safety check when Kinto-Core is used without pyramid_multiauth.\n if request.prefixed_userid:\n userid = request.prefixed_userid\n\n # Query the permission backend only once per request (e.g. batch).\n reify_key = userid + '_principals'\n if reify_key not in request.bound_data:\n principals = backend.get_user_principals(userid)\n request.bound_data[reify_key] = principals\n\n return request.bound_data[reify_key]\n\n\n@implementer(IAuthorizationPolicy)\nclass AuthorizationPolicy(object):\n \"\"\"Default authorization class, that leverages the permission backend\n for shareable resources.\n \"\"\"\n\n get_bound_permissions = None\n \"\"\"Callable that takes an object id and a permission and returns\n a list of tuples (<object id>, <permission>). Useful when objects\n permission depend on others.\"\"\"\n\n def permits(self, context, principals, permission):\n if permission == PRIVATE:\n return Authenticated in principals\n\n # Add prefixed user id to principals.\n prefixed_userid = context.get_prefixed_userid()\n if prefixed_userid and ':' in prefixed_userid:\n principals = principals + [prefixed_userid]\n prefix, user_id = prefixed_userid.split(':', 1)\n # Remove unprefixed user id to avoid conflicts.\n # (it is added via Pyramid Authn policy effective principals)\n if user_id in principals:\n principals.remove(user_id)\n # Retro-compatibility with cliquet 2.0 '_' user id prefixes.\n # Just in case it was used in permissions definitions.\n principals.append('%s_%s' % (prefix, user_id))\n\n if permission == DYNAMIC:\n permission = context.required_permission\n\n if permission == 'create':\n permission = '%s:%s' % (context.resource_name, permission)\n\n if context.allowed_principals:\n allowed = bool(set(context.allowed_principals) & set(principals))\n else:\n object_id = context.permission_object_id\n if self.get_bound_permissions is None:\n bound_perms = [(object_id, permission)]\n else:\n bound_perms = self.get_bound_permissions(object_id, permission)\n allowed = context.check_permission(principals, bound_perms)\n\n # If not allowed on this collection, but some records are shared with\n # the current user, then authorize.\n # The ShareableResource class will take care of the filtering.\n is_list_operation = (context.on_collection and\n not permission.endswith('create'))\n if not allowed and is_list_operation:\n shared = context.fetch_shared_records(permission,\n principals,\n self.get_bound_permissions)\n allowed = shared is not None\n\n return allowed\n\n def principals_allowed_by_permission(self, context, permission):\n raise NotImplementedError() # PRAGMA NOCOVER\n\n\nclass RouteFactory(object):\n resource_name = None\n on_collection = False\n required_permission = None\n allowed_principals = None\n permission_object_id = None\n current_record = None\n shared_ids = None\n\n method_permissions = {\n \"head\": \"read\",\n \"get\": \"read\",\n \"post\": \"create\",\n \"delete\": \"write\",\n \"patch\": \"write\"\n }\n\n def __init__(self, request):\n # Make it available for the authorization policy.\n self.get_prefixed_userid = functools.partial(prefixed_userid, request)\n\n # Store some shortcuts.\n permission = request.registry.permission\n self.check_permission = permission.check_permission\n self._get_accessible_objects = permission.get_accessible_objects\n\n # Store current resource and required permission.\n service = utils.current_service(request)\n is_on_resource = (service is not None and\n hasattr(service, 'viewset') and\n hasattr(service, 'resource'))\n if is_on_resource:\n self.resource_name = request.current_resource_name\n self.on_collection = getattr(service, \"type\", None) == \"collection\"\n\n self.permission_object_id, self.required_permission = (\n self._find_required_permission(request, service))\n\n # To obtain shared records on a collection endpoint, use a match:\n self._object_id_match = self.get_permission_object_id(request, '*')\n\n # Check if principals are allowed explicitly from settings.\n settings = request.registry.settings\n setting = '%s_%s_principals' % (self.resource_name,\n self.required_permission)\n self.allowed_principals = aslist(settings.get(setting, ''))\n\n def fetch_shared_records(self, perm, principals, get_bound_permissions):\n \"\"\"Fetch records that are readable or writable for the current\n principals.\n\n See :meth:`kinto.core.authorization.AuthorizationPolicy.permits`\n\n If no record is shared, it returns None.\n\n .. warning::\n This sets the ``shared_ids`` attribute to the context with the\n return value. The attribute is then read by\n :class:`kinto.core.resource.ShareableResource`\n \"\"\"\n if get_bound_permissions:\n bound_perms = get_bound_permissions(self._object_id_match, perm)\n else:\n bound_perms = [(self._object_id_match, perm)]\n by_obj_id = self._get_accessible_objects(principals, bound_perms)\n ids = by_obj_id.keys()\n if len(ids) > 0:\n # Store for later use in ``ShareableResource``.\n self.shared_ids = [self._extract_object_id(id_) for id_ in ids]\n else:\n self.shared_ids = None\n\n return self.shared_ids\n\n def get_permission_object_id(self, request, object_id=None):\n \"\"\"Returns the permission object id for the current request.\n In the nominal case, it is just the current URI without version prefix.\n For collections, it is the related record URI using the specified\n `object_id`.\n\n See :meth:`kinto.core.resource.model.SharableModel` and\n :meth:`kinto.core.authorization.RouteFactory.__init__`\n \"\"\"\n object_uri = utils.strip_uri_prefix(request.path)\n\n if self.on_collection and object_id is not None:\n # With the current request on a collection, the record URI must\n # be found out by inspecting the collection service and its sibling\n # record service.\n matchdict = request.matchdict.copy()\n matchdict['id'] = object_id\n try:\n object_uri = utils.instance_uri(request,\n self.resource_name,\n **matchdict)\n if object_id == '*':\n object_uri = object_uri.replace('%2A', '*')\n except KeyError:\n # Maybe the resource has no single record endpoint.\n # We consider that object URIs in permissions backend will\n # be stored naively:\n object_uri = object_uri + '/' + object_id\n\n return object_uri\n\n def _extract_object_id(self, object_uri):\n # XXX: Rewrite using kinto.core.utils.view_lookup() and matchdict['id']\n return object_uri.split('/')[-1]\n\n def _find_required_permission(self, request, service):\n \"\"\"Find out what is the permission object id and the required\n permission.\n\n .. note::\n This method saves an attribute ``self.current_record`` used\n in :class:`kinto.core.resource.UserResource`.\n \"\"\"\n # By default, it's a URI a and permission associated to the method.\n permission_object_id = self.get_permission_object_id(request)\n method = request.method.lower()\n required_permission = self.method_permissions.get(method)\n\n # For create permission, the object id is the plural endpoint.\n collection_path = service.collection_path.format(**request.matchdict)\n\n # In the case of a \"PUT\", check if the targetted record already\n # exists, return \"write\" if it does, \"create\" otherwise.\n if request.method.lower() == \"put\":\n resource = service.resource(request=request, context=self)\n try:\n record = resource.model.get_record(resource.record_id)\n # Save a reference, to avoid refetching from storage in\n # resource.\n self.current_record = record\n except storage_exceptions.RecordNotFoundError:\n # The record does not exist, the permission to create on\n # the related collection is required.\n permission_object_id = collection_path\n required_permission = \"create\"\n else:\n # For safe creations, the user needs a create permission.\n # See Kinto/kinto#792\n if request.headers.get('If-None-Match') == '*':\n permission_object_id = collection_path\n required_permission = \"create\"\n else:\n required_permission = \"write\"\n\n return (permission_object_id, required_permission)\n", "path": "kinto/core/authorization.py"}]} | 3,081 | 316 |
gh_patches_debug_30023 | rasdani/github-patches | git_diff | scalableminds__webknossos-libs-1083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Webknossos CLI download annotation
## Context
- Affected library: webknossos
When using the webknossos CLI to download an annotation, the download command is overloaded to take care of dataset downloads and annotation downloads. An annotation should be downloaded when no dataset is found. This is triggered by an AssertionError. Currently no assertion is thrown. Instead, an UnexpectedStatusError is raised.
## Expected Behavior
When a URL to an Annotation is given, the Annotation should be downloaded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `webknossos/webknossos/cli/download.py`
Content:
```
1 """This module takes care of downloading WEBKNOSSOS datasets."""
2
3 from typing import Any, List, Optional
4
5 import typer
6 from typing_extensions import Annotated
7
8 from ..annotation import Annotation
9 from ..client import webknossos_context
10 from ..dataset import Dataset
11 from ..geometry import BoundingBox, Mag
12 from ._utils import parse_bbox, parse_mag, parse_path
13
14
15 def main(
16 *,
17 target: Annotated[
18 Any,
19 typer.Argument(
20 show_default=False,
21 help="Path to save your WEBKNOSSOS dataset.",
22 parser=parse_path,
23 ),
24 ],
25 url: Annotated[
26 str,
27 typer.Option(
28 help="URL of your dataset or your annotation.",
29 ),
30 ],
31 token: Annotated[
32 Optional[str],
33 typer.Option(
34 help="Authentication token for WEBKNOSSOS instance "
35 "(https://webknossos.org/auth/token).",
36 rich_help_panel="WEBKNOSSOS context",
37 envvar="WK_TOKEN",
38 ),
39 ] = None,
40 bbox: Annotated[
41 Optional[BoundingBox],
42 typer.Option(
43 rich_help_panel="Partial download",
44 help="Bounding box that should be downloaded. "
45 "The input format is x,y,z,width,height,depth. "
46 "Should be a comma separated string (e.g. 0,0,0,10,10,10).",
47 parser=parse_bbox,
48 metavar="BBOX",
49 ),
50 ] = None,
51 layer: Annotated[
52 Optional[List[str]],
53 typer.Option(
54 rich_help_panel="Partial download",
55 help="Layers that should be downloaded. "
56 "For multiple layers type: --layer color --layer segmentation",
57 ),
58 ] = None,
59 mag: Annotated[
60 Optional[List[Mag]],
61 typer.Option(
62 rich_help_panel="Partial download",
63 help="Mags that should be downloaded. "
64 "Should be number or minus separated string (e.g. 2 or 2-2-2). "
65 "For multiple mags type: --mag 1 --mag 2",
66 parser=parse_mag,
67 metavar="MAG",
68 ),
69 ] = None,
70 ) -> None:
71 """Download a dataset from a WEBKNOSSOS server."""
72
73 layers = layer if layer else None
74 mags = mag if mag else None
75
76 with webknossos_context(token=token):
77 try:
78 Dataset.download(
79 dataset_name_or_url=url,
80 path=target,
81 bbox=bbox,
82 layers=layers,
83 mags=mags,
84 )
85 except AssertionError:
86 Annotation.download(annotation_id_or_url=url).save(target)
87
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/webknossos/webknossos/cli/download.py b/webknossos/webknossos/cli/download.py
--- a/webknossos/webknossos/cli/download.py
+++ b/webknossos/webknossos/cli/download.py
@@ -1,13 +1,15 @@
"""This module takes care of downloading WEBKNOSSOS datasets."""
+import re
from typing import Any, List, Optional
import typer
from typing_extensions import Annotated
-from ..annotation import Annotation
+from ..annotation.annotation import _ANNOTATION_URL_REGEX, Annotation
from ..client import webknossos_context
-from ..dataset import Dataset
+from ..client._resolve_short_link import resolve_short_link
+from ..dataset.dataset import _DATASET_URL_REGEX, Dataset
from ..geometry import BoundingBox, Mag
from ._utils import parse_bbox, parse_mag, parse_path
@@ -72,9 +74,10 @@
layers = layer if layer else None
mags = mag if mag else None
+ url = resolve_short_link(url)
with webknossos_context(token=token):
- try:
+ if re.match(_DATASET_URL_REGEX, url):
Dataset.download(
dataset_name_or_url=url,
path=target,
@@ -82,5 +85,13 @@
layers=layers,
mags=mags,
)
- except AssertionError:
+ elif re.match(_ANNOTATION_URL_REGEX, url):
Annotation.download(annotation_id_or_url=url).save(target)
+ else:
+ raise RuntimeError(
+ "The provided URL does not lead to a dataset or annotation."
+ )
+
+
+if __name__ == "__main__":
+ typer.run(main)
| {"golden_diff": "diff --git a/webknossos/webknossos/cli/download.py b/webknossos/webknossos/cli/download.py\n--- a/webknossos/webknossos/cli/download.py\n+++ b/webknossos/webknossos/cli/download.py\n@@ -1,13 +1,15 @@\n \"\"\"This module takes care of downloading WEBKNOSSOS datasets.\"\"\"\n \n+import re\n from typing import Any, List, Optional\n \n import typer\n from typing_extensions import Annotated\n \n-from ..annotation import Annotation\n+from ..annotation.annotation import _ANNOTATION_URL_REGEX, Annotation\n from ..client import webknossos_context\n-from ..dataset import Dataset\n+from ..client._resolve_short_link import resolve_short_link\n+from ..dataset.dataset import _DATASET_URL_REGEX, Dataset\n from ..geometry import BoundingBox, Mag\n from ._utils import parse_bbox, parse_mag, parse_path\n \n@@ -72,9 +74,10 @@\n \n layers = layer if layer else None\n mags = mag if mag else None\n+ url = resolve_short_link(url)\n \n with webknossos_context(token=token):\n- try:\n+ if re.match(_DATASET_URL_REGEX, url):\n Dataset.download(\n dataset_name_or_url=url,\n path=target,\n@@ -82,5 +85,13 @@\n layers=layers,\n mags=mags,\n )\n- except AssertionError:\n+ elif re.match(_ANNOTATION_URL_REGEX, url):\n Annotation.download(annotation_id_or_url=url).save(target)\n+ else:\n+ raise RuntimeError(\n+ \"The provided URL does not lead to a dataset or annotation.\"\n+ )\n+\n+\n+if __name__ == \"__main__\":\n+ typer.run(main)\n", "issue": "Webknossos CLI download annotation\n## Context\r\n- Affected library: webknossos\r\nWhen using the webknossos CLI to download an annotation, the download command is overloaded to take care of dataset downloads and annotation downloads. An annotation should be downloaded when no dataset is found. This is triggered by an AssertionError. Currently no assertion is thrown. Instead, an UnexpectedStatusError is raised.\r\n\r\n## Expected Behavior\r\nWhen a URL to an Annotation is given, the Annotation should be downloaded.\r\n\r\n\n", "before_files": [{"content": "\"\"\"This module takes care of downloading WEBKNOSSOS datasets.\"\"\"\n\nfrom typing import Any, List, Optional\n\nimport typer\nfrom typing_extensions import Annotated\n\nfrom ..annotation import Annotation\nfrom ..client import webknossos_context\nfrom ..dataset import Dataset\nfrom ..geometry import BoundingBox, Mag\nfrom ._utils import parse_bbox, parse_mag, parse_path\n\n\ndef main(\n *,\n target: Annotated[\n Any,\n typer.Argument(\n show_default=False,\n help=\"Path to save your WEBKNOSSOS dataset.\",\n parser=parse_path,\n ),\n ],\n url: Annotated[\n str,\n typer.Option(\n help=\"URL of your dataset or your annotation.\",\n ),\n ],\n token: Annotated[\n Optional[str],\n typer.Option(\n help=\"Authentication token for WEBKNOSSOS instance \"\n \"(https://webknossos.org/auth/token).\",\n rich_help_panel=\"WEBKNOSSOS context\",\n envvar=\"WK_TOKEN\",\n ),\n ] = None,\n bbox: Annotated[\n Optional[BoundingBox],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Bounding box that should be downloaded. \"\n \"The input format is x,y,z,width,height,depth. \"\n \"Should be a comma separated string (e.g. 0,0,0,10,10,10).\",\n parser=parse_bbox,\n metavar=\"BBOX\",\n ),\n ] = None,\n layer: Annotated[\n Optional[List[str]],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Layers that should be downloaded. \"\n \"For multiple layers type: --layer color --layer segmentation\",\n ),\n ] = None,\n mag: Annotated[\n Optional[List[Mag]],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Mags that should be downloaded. \"\n \"Should be number or minus separated string (e.g. 2 or 2-2-2). \"\n \"For multiple mags type: --mag 1 --mag 2\",\n parser=parse_mag,\n metavar=\"MAG\",\n ),\n ] = None,\n) -> None:\n \"\"\"Download a dataset from a WEBKNOSSOS server.\"\"\"\n\n layers = layer if layer else None\n mags = mag if mag else None\n\n with webknossos_context(token=token):\n try:\n Dataset.download(\n dataset_name_or_url=url,\n path=target,\n bbox=bbox,\n layers=layers,\n mags=mags,\n )\n except AssertionError:\n Annotation.download(annotation_id_or_url=url).save(target)\n", "path": "webknossos/webknossos/cli/download.py"}], "after_files": [{"content": "\"\"\"This module takes care of downloading WEBKNOSSOS datasets.\"\"\"\n\nimport re\nfrom typing import Any, List, Optional\n\nimport typer\nfrom typing_extensions import Annotated\n\nfrom ..annotation.annotation import _ANNOTATION_URL_REGEX, Annotation\nfrom ..client import webknossos_context\nfrom ..client._resolve_short_link import resolve_short_link\nfrom ..dataset.dataset import _DATASET_URL_REGEX, Dataset\nfrom ..geometry import BoundingBox, Mag\nfrom ._utils import parse_bbox, parse_mag, parse_path\n\n\ndef main(\n *,\n target: Annotated[\n Any,\n typer.Argument(\n show_default=False,\n help=\"Path to save your WEBKNOSSOS dataset.\",\n parser=parse_path,\n ),\n ],\n url: Annotated[\n str,\n typer.Option(\n help=\"URL of your dataset or your annotation.\",\n ),\n ],\n token: Annotated[\n Optional[str],\n typer.Option(\n help=\"Authentication token for WEBKNOSSOS instance \"\n \"(https://webknossos.org/auth/token).\",\n rich_help_panel=\"WEBKNOSSOS context\",\n envvar=\"WK_TOKEN\",\n ),\n ] = None,\n bbox: Annotated[\n Optional[BoundingBox],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Bounding box that should be downloaded. \"\n \"The input format is x,y,z,width,height,depth. \"\n \"Should be a comma separated string (e.g. 0,0,0,10,10,10).\",\n parser=parse_bbox,\n metavar=\"BBOX\",\n ),\n ] = None,\n layer: Annotated[\n Optional[List[str]],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Layers that should be downloaded. \"\n \"For multiple layers type: --layer color --layer segmentation\",\n ),\n ] = None,\n mag: Annotated[\n Optional[List[Mag]],\n typer.Option(\n rich_help_panel=\"Partial download\",\n help=\"Mags that should be downloaded. \"\n \"Should be number or minus separated string (e.g. 2 or 2-2-2). \"\n \"For multiple mags type: --mag 1 --mag 2\",\n parser=parse_mag,\n metavar=\"MAG\",\n ),\n ] = None,\n) -> None:\n \"\"\"Download a dataset from a WEBKNOSSOS server.\"\"\"\n\n layers = layer if layer else None\n mags = mag if mag else None\n url = resolve_short_link(url)\n\n with webknossos_context(token=token):\n if re.match(_DATASET_URL_REGEX, url):\n Dataset.download(\n dataset_name_or_url=url,\n path=target,\n bbox=bbox,\n layers=layers,\n mags=mags,\n )\n elif re.match(_ANNOTATION_URL_REGEX, url):\n Annotation.download(annotation_id_or_url=url).save(target)\n else:\n raise RuntimeError(\n \"The provided URL does not lead to a dataset or annotation.\"\n )\n\n\nif __name__ == \"__main__\":\n typer.run(main)\n", "path": "webknossos/webknossos/cli/download.py"}]} | 1,111 | 379 |
gh_patches_debug_5052 | rasdani/github-patches | git_diff | TheAlgorithms__Python-8766 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
I found typo in graphs/greedy_best_first
### What would you like to share?
The dx and dy are reversed!
https://github.com/TheAlgorithms/Python/blob/ce43a8ac4ad14e1639014d374b1137906218cfe3/graphs/greedy_best_first.py#L61-L63
Expected correction
```python
dx = abs(self.pos_x - self.goal_x)
dy = abs(self.pos_y - self.goal_y)
return dx + dy
```
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `graphs/greedy_best_first.py`
Content:
```
1 """
2 https://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS
3 """
4
5 from __future__ import annotations
6
7 Path = list[tuple[int, int]]
8
9 grid = [
10 [0, 0, 0, 0, 0, 0, 0],
11 [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
12 [0, 0, 0, 0, 0, 0, 0],
13 [0, 0, 1, 0, 0, 0, 0],
14 [1, 0, 1, 0, 0, 0, 0],
15 [0, 0, 0, 0, 0, 0, 0],
16 [0, 0, 0, 0, 1, 0, 0],
17 ]
18
19 delta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
20
21
22 class Node:
23 """
24 >>> k = Node(0, 0, 4, 5, 0, None)
25 >>> k.calculate_heuristic()
26 9
27 >>> n = Node(1, 4, 3, 4, 2, None)
28 >>> n.calculate_heuristic()
29 2
30 >>> l = [k, n]
31 >>> n == l[0]
32 False
33 >>> l.sort()
34 >>> n == l[0]
35 True
36 """
37
38 def __init__(
39 self,
40 pos_x: int,
41 pos_y: int,
42 goal_x: int,
43 goal_y: int,
44 g_cost: float,
45 parent: Node | None,
46 ):
47 self.pos_x = pos_x
48 self.pos_y = pos_y
49 self.pos = (pos_y, pos_x)
50 self.goal_x = goal_x
51 self.goal_y = goal_y
52 self.g_cost = g_cost
53 self.parent = parent
54 self.f_cost = self.calculate_heuristic()
55
56 def calculate_heuristic(self) -> float:
57 """
58 The heuristic here is the Manhattan Distance
59 Could elaborate to offer more than one choice
60 """
61 dy = abs(self.pos_x - self.goal_x)
62 dx = abs(self.pos_y - self.goal_y)
63 return dx + dy
64
65 def __lt__(self, other) -> bool:
66 return self.f_cost < other.f_cost
67
68
69 class GreedyBestFirst:
70 """
71 >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))
72 >>> [x.pos for x in gbf.get_successors(gbf.start)]
73 [(1, 0), (0, 1)]
74 >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])
75 (0, 1)
76 >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])
77 (1, 0)
78 >>> gbf.retrace_path(gbf.start)
79 [(0, 0)]
80 >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE
81 [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),
82 (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]
83 """
84
85 def __init__(self, start: tuple[int, int], goal: tuple[int, int]):
86 self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)
87 self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)
88
89 self.open_nodes = [self.start]
90 self.closed_nodes: list[Node] = []
91
92 self.reached = False
93
94 def search(self) -> Path | None:
95 """
96 Search for the path,
97 if a path is not found, only the starting position is returned
98 """
99 while self.open_nodes:
100 # Open Nodes are sorted using __lt__
101 self.open_nodes.sort()
102 current_node = self.open_nodes.pop(0)
103
104 if current_node.pos == self.target.pos:
105 self.reached = True
106 return self.retrace_path(current_node)
107
108 self.closed_nodes.append(current_node)
109 successors = self.get_successors(current_node)
110
111 for child_node in successors:
112 if child_node in self.closed_nodes:
113 continue
114
115 if child_node not in self.open_nodes:
116 self.open_nodes.append(child_node)
117 else:
118 # retrieve the best current path
119 better_node = self.open_nodes.pop(self.open_nodes.index(child_node))
120
121 if child_node.g_cost < better_node.g_cost:
122 self.open_nodes.append(child_node)
123 else:
124 self.open_nodes.append(better_node)
125
126 if not self.reached:
127 return [self.start.pos]
128 return None
129
130 def get_successors(self, parent: Node) -> list[Node]:
131 """
132 Returns a list of successors (both in the grid and free spaces)
133 """
134 successors = []
135 for action in delta:
136 pos_x = parent.pos_x + action[1]
137 pos_y = parent.pos_y + action[0]
138
139 if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):
140 continue
141
142 if grid[pos_y][pos_x] != 0:
143 continue
144
145 successors.append(
146 Node(
147 pos_x,
148 pos_y,
149 self.target.pos_y,
150 self.target.pos_x,
151 parent.g_cost + 1,
152 parent,
153 )
154 )
155 return successors
156
157 def retrace_path(self, node: Node | None) -> Path:
158 """
159 Retrace the path from parents to parents until start node
160 """
161 current_node = node
162 path = []
163 while current_node is not None:
164 path.append((current_node.pos_y, current_node.pos_x))
165 current_node = current_node.parent
166 path.reverse()
167 return path
168
169
170 if __name__ == "__main__":
171 init = (0, 0)
172 goal = (len(grid) - 1, len(grid[0]) - 1)
173 for elem in grid:
174 print(elem)
175
176 print("------")
177
178 greedy_bf = GreedyBestFirst(init, goal)
179 path = greedy_bf.search()
180 if path:
181 for pos_x, pos_y in path:
182 grid[pos_x][pos_y] = 2
183
184 for elem in grid:
185 print(elem)
186
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py
--- a/graphs/greedy_best_first.py
+++ b/graphs/greedy_best_first.py
@@ -58,8 +58,8 @@
The heuristic here is the Manhattan Distance
Could elaborate to offer more than one choice
"""
- dy = abs(self.pos_x - self.goal_x)
- dx = abs(self.pos_y - self.goal_y)
+ dx = abs(self.pos_x - self.goal_x)
+ dy = abs(self.pos_y - self.goal_y)
return dx + dy
def __lt__(self, other) -> bool:
| {"golden_diff": "diff --git a/graphs/greedy_best_first.py b/graphs/greedy_best_first.py\n--- a/graphs/greedy_best_first.py\n+++ b/graphs/greedy_best_first.py\n@@ -58,8 +58,8 @@\n The heuristic here is the Manhattan Distance\n Could elaborate to offer more than one choice\n \"\"\"\n- dy = abs(self.pos_x - self.goal_x)\n- dx = abs(self.pos_y - self.goal_y)\n+ dx = abs(self.pos_x - self.goal_x)\n+ dy = abs(self.pos_y - self.goal_y)\n return dx + dy\n \n def __lt__(self, other) -> bool:\n", "issue": "I found typo in graphs/greedy_best_first\n### What would you like to share?\n\nThe dx and dy are reversed!\r\nhttps://github.com/TheAlgorithms/Python/blob/ce43a8ac4ad14e1639014d374b1137906218cfe3/graphs/greedy_best_first.py#L61-L63\r\n\r\nExpected correction\r\n```python\r\ndx = abs(self.pos_x - self.goal_x)\r\ndy = abs(self.pos_y - self.goal_y)\r\nreturn dx + dy\r\n```\n\n### Additional information\n\n_No response_\n", "before_files": [{"content": "\"\"\"\nhttps://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS\n\"\"\"\n\nfrom __future__ import annotations\n\nPath = list[tuple[int, int]]\n\ngrid = [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n]\n\ndelta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right\n\n\nclass Node:\n \"\"\"\n >>> k = Node(0, 0, 4, 5, 0, None)\n >>> k.calculate_heuristic()\n 9\n >>> n = Node(1, 4, 3, 4, 2, None)\n >>> n.calculate_heuristic()\n 2\n >>> l = [k, n]\n >>> n == l[0]\n False\n >>> l.sort()\n >>> n == l[0]\n True\n \"\"\"\n\n def __init__(\n self,\n pos_x: int,\n pos_y: int,\n goal_x: int,\n goal_y: int,\n g_cost: float,\n parent: Node | None,\n ):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.pos = (pos_y, pos_x)\n self.goal_x = goal_x\n self.goal_y = goal_y\n self.g_cost = g_cost\n self.parent = parent\n self.f_cost = self.calculate_heuristic()\n\n def calculate_heuristic(self) -> float:\n \"\"\"\n The heuristic here is the Manhattan Distance\n Could elaborate to offer more than one choice\n \"\"\"\n dy = abs(self.pos_x - self.goal_x)\n dx = abs(self.pos_y - self.goal_y)\n return dx + dy\n\n def __lt__(self, other) -> bool:\n return self.f_cost < other.f_cost\n\n\nclass GreedyBestFirst:\n \"\"\"\n >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))\n >>> [x.pos for x in gbf.get_successors(gbf.start)]\n [(1, 0), (0, 1)]\n >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])\n (0, 1)\n >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])\n (1, 0)\n >>> gbf.retrace_path(gbf.start)\n [(0, 0)]\n >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE\n [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),\n (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]\n \"\"\"\n\n def __init__(self, start: tuple[int, int], goal: tuple[int, int]):\n self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)\n self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)\n\n self.open_nodes = [self.start]\n self.closed_nodes: list[Node] = []\n\n self.reached = False\n\n def search(self) -> Path | None:\n \"\"\"\n Search for the path,\n if a path is not found, only the starting position is returned\n \"\"\"\n while self.open_nodes:\n # Open Nodes are sorted using __lt__\n self.open_nodes.sort()\n current_node = self.open_nodes.pop(0)\n\n if current_node.pos == self.target.pos:\n self.reached = True\n return self.retrace_path(current_node)\n\n self.closed_nodes.append(current_node)\n successors = self.get_successors(current_node)\n\n for child_node in successors:\n if child_node in self.closed_nodes:\n continue\n\n if child_node not in self.open_nodes:\n self.open_nodes.append(child_node)\n else:\n # retrieve the best current path\n better_node = self.open_nodes.pop(self.open_nodes.index(child_node))\n\n if child_node.g_cost < better_node.g_cost:\n self.open_nodes.append(child_node)\n else:\n self.open_nodes.append(better_node)\n\n if not self.reached:\n return [self.start.pos]\n return None\n\n def get_successors(self, parent: Node) -> list[Node]:\n \"\"\"\n Returns a list of successors (both in the grid and free spaces)\n \"\"\"\n successors = []\n for action in delta:\n pos_x = parent.pos_x + action[1]\n pos_y = parent.pos_y + action[0]\n\n if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):\n continue\n\n if grid[pos_y][pos_x] != 0:\n continue\n\n successors.append(\n Node(\n pos_x,\n pos_y,\n self.target.pos_y,\n self.target.pos_x,\n parent.g_cost + 1,\n parent,\n )\n )\n return successors\n\n def retrace_path(self, node: Node | None) -> Path:\n \"\"\"\n Retrace the path from parents to parents until start node\n \"\"\"\n current_node = node\n path = []\n while current_node is not None:\n path.append((current_node.pos_y, current_node.pos_x))\n current_node = current_node.parent\n path.reverse()\n return path\n\n\nif __name__ == \"__main__\":\n init = (0, 0)\n goal = (len(grid) - 1, len(grid[0]) - 1)\n for elem in grid:\n print(elem)\n\n print(\"------\")\n\n greedy_bf = GreedyBestFirst(init, goal)\n path = greedy_bf.search()\n if path:\n for pos_x, pos_y in path:\n grid[pos_x][pos_y] = 2\n\n for elem in grid:\n print(elem)\n", "path": "graphs/greedy_best_first.py"}], "after_files": [{"content": "\"\"\"\nhttps://en.wikipedia.org/wiki/Best-first_search#Greedy_BFS\n\"\"\"\n\nfrom __future__ import annotations\n\nPath = list[tuple[int, int]]\n\ngrid = [\n [0, 0, 0, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 1, 0, 0, 0, 0],\n [1, 0, 1, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 1, 0, 0],\n]\n\ndelta = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right\n\n\nclass Node:\n \"\"\"\n >>> k = Node(0, 0, 4, 5, 0, None)\n >>> k.calculate_heuristic()\n 9\n >>> n = Node(1, 4, 3, 4, 2, None)\n >>> n.calculate_heuristic()\n 2\n >>> l = [k, n]\n >>> n == l[0]\n False\n >>> l.sort()\n >>> n == l[0]\n True\n \"\"\"\n\n def __init__(\n self,\n pos_x: int,\n pos_y: int,\n goal_x: int,\n goal_y: int,\n g_cost: float,\n parent: Node | None,\n ):\n self.pos_x = pos_x\n self.pos_y = pos_y\n self.pos = (pos_y, pos_x)\n self.goal_x = goal_x\n self.goal_y = goal_y\n self.g_cost = g_cost\n self.parent = parent\n self.f_cost = self.calculate_heuristic()\n\n def calculate_heuristic(self) -> float:\n \"\"\"\n The heuristic here is the Manhattan Distance\n Could elaborate to offer more than one choice\n \"\"\"\n dx = abs(self.pos_x - self.goal_x)\n dy = abs(self.pos_y - self.goal_y)\n return dx + dy\n\n def __lt__(self, other) -> bool:\n return self.f_cost < other.f_cost\n\n\nclass GreedyBestFirst:\n \"\"\"\n >>> gbf = GreedyBestFirst((0, 0), (len(grid) - 1, len(grid[0]) - 1))\n >>> [x.pos for x in gbf.get_successors(gbf.start)]\n [(1, 0), (0, 1)]\n >>> (gbf.start.pos_y + delta[3][0], gbf.start.pos_x + delta[3][1])\n (0, 1)\n >>> (gbf.start.pos_y + delta[2][0], gbf.start.pos_x + delta[2][1])\n (1, 0)\n >>> gbf.retrace_path(gbf.start)\n [(0, 0)]\n >>> gbf.search() # doctest: +NORMALIZE_WHITESPACE\n [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 1), (5, 1), (6, 1),\n (6, 2), (6, 3), (5, 3), (5, 4), (5, 5), (6, 5), (6, 6)]\n \"\"\"\n\n def __init__(self, start: tuple[int, int], goal: tuple[int, int]):\n self.start = Node(start[1], start[0], goal[1], goal[0], 0, None)\n self.target = Node(goal[1], goal[0], goal[1], goal[0], 99999, None)\n\n self.open_nodes = [self.start]\n self.closed_nodes: list[Node] = []\n\n self.reached = False\n\n def search(self) -> Path | None:\n \"\"\"\n Search for the path,\n if a path is not found, only the starting position is returned\n \"\"\"\n while self.open_nodes:\n # Open Nodes are sorted using __lt__\n self.open_nodes.sort()\n current_node = self.open_nodes.pop(0)\n\n if current_node.pos == self.target.pos:\n self.reached = True\n return self.retrace_path(current_node)\n\n self.closed_nodes.append(current_node)\n successors = self.get_successors(current_node)\n\n for child_node in successors:\n if child_node in self.closed_nodes:\n continue\n\n if child_node not in self.open_nodes:\n self.open_nodes.append(child_node)\n else:\n # retrieve the best current path\n better_node = self.open_nodes.pop(self.open_nodes.index(child_node))\n\n if child_node.g_cost < better_node.g_cost:\n self.open_nodes.append(child_node)\n else:\n self.open_nodes.append(better_node)\n\n if not self.reached:\n return [self.start.pos]\n return None\n\n def get_successors(self, parent: Node) -> list[Node]:\n \"\"\"\n Returns a list of successors (both in the grid and free spaces)\n \"\"\"\n successors = []\n for action in delta:\n pos_x = parent.pos_x + action[1]\n pos_y = parent.pos_y + action[0]\n\n if not (0 <= pos_x <= len(grid[0]) - 1 and 0 <= pos_y <= len(grid) - 1):\n continue\n\n if grid[pos_y][pos_x] != 0:\n continue\n\n successors.append(\n Node(\n pos_x,\n pos_y,\n self.target.pos_y,\n self.target.pos_x,\n parent.g_cost + 1,\n parent,\n )\n )\n return successors\n\n def retrace_path(self, node: Node | None) -> Path:\n \"\"\"\n Retrace the path from parents to parents until start node\n \"\"\"\n current_node = node\n path = []\n while current_node is not None:\n path.append((current_node.pos_y, current_node.pos_x))\n current_node = current_node.parent\n path.reverse()\n return path\n\n\nif __name__ == \"__main__\":\n init = (0, 0)\n goal = (len(grid) - 1, len(grid[0]) - 1)\n for elem in grid:\n print(elem)\n\n print(\"------\")\n\n greedy_bf = GreedyBestFirst(init, goal)\n path = greedy_bf.search()\n if path:\n for pos_x, pos_y in path:\n grid[pos_x][pos_y] = 2\n\n for elem in grid:\n print(elem)\n", "path": "graphs/greedy_best_first.py"}]} | 2,376 | 147 |
gh_patches_debug_37238 | rasdani/github-patches | git_diff | conan-io__conan-16103 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[feature] show uploaded files size in `conan upload`
<!-- What is your suggestion? Please be as specific as possible! -->
It would be very convenient to provide the size of files which will be upload to specific remote via `conan upload ...` in diagnostic purposes, e.g. sometimes artifacts could be very large and CI could refuse its uploading, see e.g. [this StackOverflow question](https://stackoverflow.com/questions/64329087/gitlab-self-hosted-error-uploading-artifacts-as-archive-to-coordinator-to) as example of related CI error. With this change CI administrators could adjust the file limit at CI without trial and error changing of CI configs.
- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/cmd/uploader.py`
Content:
```
1 import os
2 import shutil
3 import time
4
5 from conan.internal.conan_app import ConanApp
6 from conan.api.output import ConanOutput
7 from conans.client.source import retrieve_exports_sources
8 from conans.errors import ConanException, NotFoundException
9 from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,
10 EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)
11 from conans.util.files import (clean_dirty, is_dirty, gather_files,
12 gzopen_without_timestamps, set_dirty_context_manager, mkdir)
13
14 UPLOAD_POLICY_FORCE = "force-upload"
15 UPLOAD_POLICY_SKIP = "skip-upload"
16
17
18 class UploadUpstreamChecker:
19 """ decides if something needs to be uploaded or force-uploaded checking if that exact
20 revision already exists in the remote server, or if the --force parameter is forcing the upload
21 This is completely irrespective of the actual package contents, it only uses the local
22 computed revision and the remote one
23 """
24 def __init__(self, app: ConanApp):
25 self._app = app
26
27 def check(self, upload_bundle, remote, force):
28 for ref, recipe_bundle in upload_bundle.refs().items():
29 self._check_upstream_recipe(ref, recipe_bundle, remote, force)
30 for pref, prev_bundle in upload_bundle.prefs(ref, recipe_bundle).items():
31 self._check_upstream_package(pref, prev_bundle, remote, force)
32
33 def _check_upstream_recipe(self, ref, ref_bundle, remote, force):
34 output = ConanOutput(scope=str(ref))
35 output.info("Checking which revisions exist in the remote server")
36 try:
37 assert ref.revision
38 # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence
39 server_ref = self._app.remote_manager.get_recipe_revision_reference(ref, remote)
40 assert server_ref # If successful (not raising NotFoundException), this will exist
41 except NotFoundException:
42 ref_bundle["force_upload"] = False
43 ref_bundle["upload"] = True
44 else:
45 if force:
46 output.info(f"Recipe '{ref.repr_notime()}' already in server, forcing upload")
47 ref_bundle["force_upload"] = True
48 ref_bundle["upload"] = True
49 else:
50 output.info(f"Recipe '{ref.repr_notime()}' already in server, skipping upload")
51 ref_bundle["upload"] = False
52 ref_bundle["force_upload"] = False
53
54 def _check_upstream_package(self, pref, prev_bundle, remote, force):
55 assert (pref.revision is not None), "Cannot upload a package without PREV"
56 assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
57
58 try:
59 # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence
60 server_revisions = self._app.remote_manager.get_package_revision_reference(pref, remote)
61 assert server_revisions
62 except NotFoundException:
63 prev_bundle["force_upload"] = False
64 prev_bundle["upload"] = True
65 else:
66 output = ConanOutput(scope=str(pref.ref))
67 if force:
68 output.info(f"Package '{pref.repr_notime()}' already in server, forcing upload")
69 prev_bundle["force_upload"] = True
70 prev_bundle["upload"] = True
71 else:
72 output.info(f"Package '{pref.repr_notime()}' already in server, skipping upload")
73 prev_bundle["force_upload"] = False
74 prev_bundle["upload"] = False
75
76
77 class PackagePreparator:
78 def __init__(self, app: ConanApp, global_conf):
79 self._app = app
80 self._global_conf = global_conf
81
82 def prepare(self, upload_bundle, enabled_remotes):
83 for ref, bundle in upload_bundle.refs().items():
84 layout = self._app.cache.recipe_layout(ref)
85 conanfile_path = layout.conanfile()
86 conanfile = self._app.loader.load_basic(conanfile_path)
87
88 if bundle.get("upload"):
89 self._prepare_recipe(ref, bundle, conanfile, enabled_remotes)
90 for pref, prev_bundle in upload_bundle.prefs(ref, bundle).items():
91 if prev_bundle.get("upload"):
92 self._prepare_package(pref, prev_bundle)
93
94 def _prepare_recipe(self, ref, ref_bundle, conanfile, remotes):
95 """ do a bunch of things that are necessary before actually executing the upload:
96 - retrieve exports_sources to complete the recipe if necessary
97 - compress the artifacts in conan_export.tgz and conan_export_sources.tgz
98 """
99 try:
100 recipe_layout = self._app.cache.recipe_layout(ref)
101 retrieve_exports_sources(self._app.remote_manager, recipe_layout, conanfile, ref,
102 remotes)
103 cache_files = self._compress_recipe_files(recipe_layout, ref)
104 ref_bundle["files"] = cache_files
105 except Exception as e:
106 raise ConanException(f"{ref} Error while compressing: {e}")
107
108 def _compress_recipe_files(self, layout, ref):
109 download_export_folder = layout.download_export()
110
111 output = ConanOutput(scope=str(ref))
112 for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):
113 tgz_path = os.path.join(download_export_folder, f)
114 if is_dirty(tgz_path):
115 output.warning("Removing %s, marked as dirty" % f)
116 os.remove(tgz_path)
117 clean_dirty(tgz_path)
118
119 export_folder = layout.export()
120 files, symlinked_folders = gather_files(export_folder)
121 files.update(symlinked_folders)
122 if CONANFILE not in files or CONAN_MANIFEST not in files:
123 raise ConanException("Cannot upload corrupted recipe '%s'" % str(ref))
124 export_src_folder = layout.export_sources()
125 src_files, src_symlinked_folders = gather_files(export_src_folder)
126 src_files.update(src_symlinked_folders)
127
128 # We do a copy of conanfile and conanmanifest to the download_export_folder
129 # so it is identical as when it is downloaded, and all files are from the same location
130 # to be uploaded
131 mkdir(download_export_folder)
132 shutil.copy2(os.path.join(export_folder, CONANFILE),
133 os.path.join(download_export_folder, CONANFILE))
134 shutil.copy2(os.path.join(export_folder, CONAN_MANIFEST),
135 os.path.join(download_export_folder, CONAN_MANIFEST))
136 result = {CONANFILE: os.path.join(download_export_folder, CONANFILE),
137 CONAN_MANIFEST: os.path.join(download_export_folder, CONAN_MANIFEST)}
138 # Files NOT included in the tgz
139 files.pop(CONANFILE)
140 files.pop(CONAN_MANIFEST)
141
142 def add_tgz(tgz_name, tgz_files):
143 tgz = os.path.join(download_export_folder, tgz_name)
144 if os.path.isfile(tgz):
145 result[tgz_name] = tgz
146 elif tgz_files:
147 compresslevel = self._global_conf.get("core.gzip:compresslevel", check_type=int)
148 tgz = compress_files(tgz_files, tgz_name, download_export_folder,
149 compresslevel=compresslevel, ref=ref)
150 result[tgz_name] = tgz
151
152 add_tgz(EXPORT_TGZ_NAME, files)
153 add_tgz(EXPORT_SOURCES_TGZ_NAME, src_files)
154 return result
155
156 def _prepare_package(self, pref, prev_bundle):
157 pkg_layout = self._app.cache.pkg_layout(pref)
158 if pkg_layout.package_is_dirty():
159 raise ConanException(f"Package {pref} is corrupted, aborting upload.\n"
160 f"Remove it with 'conan remove {pref}'")
161 cache_files = self._compress_package_files(pkg_layout, pref)
162 prev_bundle["files"] = cache_files
163
164 def _compress_package_files(self, layout, pref):
165 output = ConanOutput(scope=str(pref))
166 download_pkg_folder = layout.download_package()
167 package_tgz = os.path.join(download_pkg_folder, PACKAGE_TGZ_NAME)
168 if is_dirty(package_tgz):
169 output.warning("Removing %s, marked as dirty" % PACKAGE_TGZ_NAME)
170 os.remove(package_tgz)
171 clean_dirty(package_tgz)
172
173 # Get all the files in that directory
174 # existing package, will use short paths if defined
175 package_folder = layout.package()
176 files, symlinked_folders = gather_files(package_folder)
177 files.update(symlinked_folders)
178
179 if CONANINFO not in files or CONAN_MANIFEST not in files:
180 raise ConanException("Cannot upload corrupted package '%s'" % str(pref))
181
182 # Do a copy so the location of CONANINFO and MANIFEST is the "download" folder one
183 mkdir(download_pkg_folder)
184 shutil.copy2(os.path.join(package_folder, CONANINFO),
185 os.path.join(download_pkg_folder, CONANINFO))
186 shutil.copy2(os.path.join(package_folder, CONAN_MANIFEST),
187 os.path.join(download_pkg_folder, CONAN_MANIFEST))
188 # Files NOT included in the tgz
189 files.pop(CONANINFO)
190 files.pop(CONAN_MANIFEST)
191
192 if not os.path.isfile(package_tgz):
193 tgz_files = {f: path for f, path in files.items()}
194 compresslevel = self._global_conf.get("core.gzip:compresslevel", check_type=int)
195 tgz_path = compress_files(tgz_files, PACKAGE_TGZ_NAME, download_pkg_folder,
196 compresslevel=compresslevel, ref=pref)
197 assert tgz_path == package_tgz
198 assert os.path.exists(package_tgz)
199
200 return {PACKAGE_TGZ_NAME: package_tgz,
201 CONANINFO: os.path.join(download_pkg_folder, CONANINFO),
202 CONAN_MANIFEST: os.path.join(download_pkg_folder, CONAN_MANIFEST)}
203
204
205 class UploadExecutor:
206 """ does the actual file transfer to the remote. The files to be uploaded have already
207 been computed and are passed in the ``upload_data`` parameter, so this executor is also
208 agnostic about which files are transferred
209 """
210 def __init__(self, app: ConanApp):
211 self._app = app
212
213 def upload(self, upload_data, remote):
214 for ref, bundle in upload_data.refs().items():
215 if bundle.get("upload"):
216 self.upload_recipe(ref, bundle, remote)
217 for pref, prev_bundle in upload_data.prefs(ref, bundle).items():
218 if prev_bundle.get("upload"):
219 self.upload_package(pref, prev_bundle, remote)
220
221 def upload_recipe(self, ref, bundle, remote):
222 output = ConanOutput(scope=str(ref))
223 output.info(f"Uploading recipe '{ref.repr_notime()}'")
224 t1 = time.time()
225 cache_files = bundle["files"]
226
227 self._app.remote_manager.upload_recipe(ref, cache_files, remote)
228
229 duration = time.time() - t1
230 output.debug(f"Upload {ref} in {duration} time")
231 return ref
232
233 def upload_package(self, pref, prev_bundle, remote):
234 output = ConanOutput(scope=str(pref.ref))
235 output.info(f"Uploading package '{pref.repr_notime()}'")
236 cache_files = prev_bundle["files"]
237 assert (pref.revision is not None), "Cannot upload a package without PREV"
238 assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
239
240 t1 = time.time()
241 self._app.remote_manager.upload_package(pref, cache_files, remote)
242 duration = time.time() - t1
243 output.debug(f"Upload {pref} in {duration} time")
244
245
246 def compress_files(files, name, dest_dir, compresslevel=None, ref=None):
247 t1 = time.time()
248 # FIXME, better write to disk sequentially and not keep tgz contents in memory
249 tgz_path = os.path.join(dest_dir, name)
250 ConanOutput(scope=str(ref)).info(f"Compressing {name}")
251 with set_dirty_context_manager(tgz_path), open(tgz_path, "wb") as tgz_handle:
252 tgz = gzopen_without_timestamps(name, mode="w", fileobj=tgz_handle,
253 compresslevel=compresslevel)
254 for filename, abs_path in sorted(files.items()):
255 # recursive is False in case it is a symlink to a folder
256 tgz.add(abs_path, filename, recursive=False)
257 tgz.close()
258
259 duration = time.time() - t1
260 ConanOutput().debug(f"{name} compressed in {duration} time")
261 return tgz_path
262
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/cmd/uploader.py b/conans/client/cmd/uploader.py
--- a/conans/client/cmd/uploader.py
+++ b/conans/client/cmd/uploader.py
@@ -9,7 +9,8 @@
from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,
EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)
from conans.util.files import (clean_dirty, is_dirty, gather_files,
- gzopen_without_timestamps, set_dirty_context_manager, mkdir)
+ gzopen_without_timestamps, set_dirty_context_manager, mkdir,
+ human_size)
UPLOAD_POLICY_FORCE = "force-upload"
UPLOAD_POLICY_SKIP = "skip-upload"
@@ -220,10 +221,11 @@
def upload_recipe(self, ref, bundle, remote):
output = ConanOutput(scope=str(ref))
- output.info(f"Uploading recipe '{ref.repr_notime()}'")
- t1 = time.time()
cache_files = bundle["files"]
+ output.info(f"Uploading recipe '{ref.repr_notime()}' ({_total_size(cache_files)})")
+
+ t1 = time.time()
self._app.remote_manager.upload_recipe(ref, cache_files, remote)
duration = time.time() - t1
@@ -232,11 +234,12 @@
def upload_package(self, pref, prev_bundle, remote):
output = ConanOutput(scope=str(pref.ref))
- output.info(f"Uploading package '{pref.repr_notime()}'")
cache_files = prev_bundle["files"]
assert (pref.revision is not None), "Cannot upload a package without PREV"
assert (pref.ref.revision is not None), "Cannot upload a package without RREV"
+ output.info(f"Uploading package '{pref.repr_notime()}' ({_total_size(cache_files)})")
+
t1 = time.time()
self._app.remote_manager.upload_package(pref, cache_files, remote)
duration = time.time() - t1
@@ -259,3 +262,11 @@
duration = time.time() - t1
ConanOutput().debug(f"{name} compressed in {duration} time")
return tgz_path
+
+
+def _total_size(cache_files):
+ total_size = 0
+ for file in cache_files.values():
+ stat = os.stat(file)
+ total_size += stat.st_size
+ return human_size(total_size)
| {"golden_diff": "diff --git a/conans/client/cmd/uploader.py b/conans/client/cmd/uploader.py\n--- a/conans/client/cmd/uploader.py\n+++ b/conans/client/cmd/uploader.py\n@@ -9,7 +9,8 @@\n from conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,\n EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)\n from conans.util.files import (clean_dirty, is_dirty, gather_files,\n- gzopen_without_timestamps, set_dirty_context_manager, mkdir)\n+ gzopen_without_timestamps, set_dirty_context_manager, mkdir,\n+ human_size)\n \n UPLOAD_POLICY_FORCE = \"force-upload\"\n UPLOAD_POLICY_SKIP = \"skip-upload\"\n@@ -220,10 +221,11 @@\n \n def upload_recipe(self, ref, bundle, remote):\n output = ConanOutput(scope=str(ref))\n- output.info(f\"Uploading recipe '{ref.repr_notime()}'\")\n- t1 = time.time()\n cache_files = bundle[\"files\"]\n \n+ output.info(f\"Uploading recipe '{ref.repr_notime()}' ({_total_size(cache_files)})\")\n+\n+ t1 = time.time()\n self._app.remote_manager.upload_recipe(ref, cache_files, remote)\n \n duration = time.time() - t1\n@@ -232,11 +234,12 @@\n \n def upload_package(self, pref, prev_bundle, remote):\n output = ConanOutput(scope=str(pref.ref))\n- output.info(f\"Uploading package '{pref.repr_notime()}'\")\n cache_files = prev_bundle[\"files\"]\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n \n+ output.info(f\"Uploading package '{pref.repr_notime()}' ({_total_size(cache_files)})\")\n+\n t1 = time.time()\n self._app.remote_manager.upload_package(pref, cache_files, remote)\n duration = time.time() - t1\n@@ -259,3 +262,11 @@\n duration = time.time() - t1\n ConanOutput().debug(f\"{name} compressed in {duration} time\")\n return tgz_path\n+\n+\n+def _total_size(cache_files):\n+ total_size = 0\n+ for file in cache_files.values():\n+ stat = os.stat(file)\n+ total_size += stat.st_size\n+ return human_size(total_size)\n", "issue": "[feature] show uploaded files size in `conan upload`\n<!-- What is your suggestion? Please be as specific as possible! -->\r\nIt would be very convenient to provide the size of files which will be upload to specific remote via `conan upload ...` in diagnostic purposes, e.g. sometimes artifacts could be very large and CI could refuse its uploading, see e.g. [this StackOverflow question](https://stackoverflow.com/questions/64329087/gitlab-self-hosted-error-uploading-artifacts-as-archive-to-coordinator-to) as example of related CI error. With this change CI administrators could adjust the file limit at CI without trial and error changing of CI configs.\r\n\r\n- [x] I've read the [CONTRIBUTING guide](https://github.com/conan-io/conan/blob/develop/.github/CONTRIBUTING.md).\r\n\n", "before_files": [{"content": "import os\nimport shutil\nimport time\n\nfrom conan.internal.conan_app import ConanApp\nfrom conan.api.output import ConanOutput\nfrom conans.client.source import retrieve_exports_sources\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,\n EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)\nfrom conans.util.files import (clean_dirty, is_dirty, gather_files,\n gzopen_without_timestamps, set_dirty_context_manager, mkdir)\n\nUPLOAD_POLICY_FORCE = \"force-upload\"\nUPLOAD_POLICY_SKIP = \"skip-upload\"\n\n\nclass UploadUpstreamChecker:\n \"\"\" decides if something needs to be uploaded or force-uploaded checking if that exact\n revision already exists in the remote server, or if the --force parameter is forcing the upload\n This is completely irrespective of the actual package contents, it only uses the local\n computed revision and the remote one\n \"\"\"\n def __init__(self, app: ConanApp):\n self._app = app\n\n def check(self, upload_bundle, remote, force):\n for ref, recipe_bundle in upload_bundle.refs().items():\n self._check_upstream_recipe(ref, recipe_bundle, remote, force)\n for pref, prev_bundle in upload_bundle.prefs(ref, recipe_bundle).items():\n self._check_upstream_package(pref, prev_bundle, remote, force)\n\n def _check_upstream_recipe(self, ref, ref_bundle, remote, force):\n output = ConanOutput(scope=str(ref))\n output.info(\"Checking which revisions exist in the remote server\")\n try:\n assert ref.revision\n # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence\n server_ref = self._app.remote_manager.get_recipe_revision_reference(ref, remote)\n assert server_ref # If successful (not raising NotFoundException), this will exist\n except NotFoundException:\n ref_bundle[\"force_upload\"] = False\n ref_bundle[\"upload\"] = True\n else:\n if force:\n output.info(f\"Recipe '{ref.repr_notime()}' already in server, forcing upload\")\n ref_bundle[\"force_upload\"] = True\n ref_bundle[\"upload\"] = True\n else:\n output.info(f\"Recipe '{ref.repr_notime()}' already in server, skipping upload\")\n ref_bundle[\"upload\"] = False\n ref_bundle[\"force_upload\"] = False\n\n def _check_upstream_package(self, pref, prev_bundle, remote, force):\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n\n try:\n # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence\n server_revisions = self._app.remote_manager.get_package_revision_reference(pref, remote)\n assert server_revisions\n except NotFoundException:\n prev_bundle[\"force_upload\"] = False\n prev_bundle[\"upload\"] = True\n else:\n output = ConanOutput(scope=str(pref.ref))\n if force:\n output.info(f\"Package '{pref.repr_notime()}' already in server, forcing upload\")\n prev_bundle[\"force_upload\"] = True\n prev_bundle[\"upload\"] = True\n else:\n output.info(f\"Package '{pref.repr_notime()}' already in server, skipping upload\")\n prev_bundle[\"force_upload\"] = False\n prev_bundle[\"upload\"] = False\n\n\nclass PackagePreparator:\n def __init__(self, app: ConanApp, global_conf):\n self._app = app\n self._global_conf = global_conf\n\n def prepare(self, upload_bundle, enabled_remotes):\n for ref, bundle in upload_bundle.refs().items():\n layout = self._app.cache.recipe_layout(ref)\n conanfile_path = layout.conanfile()\n conanfile = self._app.loader.load_basic(conanfile_path)\n\n if bundle.get(\"upload\"):\n self._prepare_recipe(ref, bundle, conanfile, enabled_remotes)\n for pref, prev_bundle in upload_bundle.prefs(ref, bundle).items():\n if prev_bundle.get(\"upload\"):\n self._prepare_package(pref, prev_bundle)\n\n def _prepare_recipe(self, ref, ref_bundle, conanfile, remotes):\n \"\"\" do a bunch of things that are necessary before actually executing the upload:\n - retrieve exports_sources to complete the recipe if necessary\n - compress the artifacts in conan_export.tgz and conan_export_sources.tgz\n \"\"\"\n try:\n recipe_layout = self._app.cache.recipe_layout(ref)\n retrieve_exports_sources(self._app.remote_manager, recipe_layout, conanfile, ref,\n remotes)\n cache_files = self._compress_recipe_files(recipe_layout, ref)\n ref_bundle[\"files\"] = cache_files\n except Exception as e:\n raise ConanException(f\"{ref} Error while compressing: {e}\")\n\n def _compress_recipe_files(self, layout, ref):\n download_export_folder = layout.download_export()\n\n output = ConanOutput(scope=str(ref))\n for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):\n tgz_path = os.path.join(download_export_folder, f)\n if is_dirty(tgz_path):\n output.warning(\"Removing %s, marked as dirty\" % f)\n os.remove(tgz_path)\n clean_dirty(tgz_path)\n\n export_folder = layout.export()\n files, symlinked_folders = gather_files(export_folder)\n files.update(symlinked_folders)\n if CONANFILE not in files or CONAN_MANIFEST not in files:\n raise ConanException(\"Cannot upload corrupted recipe '%s'\" % str(ref))\n export_src_folder = layout.export_sources()\n src_files, src_symlinked_folders = gather_files(export_src_folder)\n src_files.update(src_symlinked_folders)\n\n # We do a copy of conanfile and conanmanifest to the download_export_folder\n # so it is identical as when it is downloaded, and all files are from the same location\n # to be uploaded\n mkdir(download_export_folder)\n shutil.copy2(os.path.join(export_folder, CONANFILE),\n os.path.join(download_export_folder, CONANFILE))\n shutil.copy2(os.path.join(export_folder, CONAN_MANIFEST),\n os.path.join(download_export_folder, CONAN_MANIFEST))\n result = {CONANFILE: os.path.join(download_export_folder, CONANFILE),\n CONAN_MANIFEST: os.path.join(download_export_folder, CONAN_MANIFEST)}\n # Files NOT included in the tgz\n files.pop(CONANFILE)\n files.pop(CONAN_MANIFEST)\n\n def add_tgz(tgz_name, tgz_files):\n tgz = os.path.join(download_export_folder, tgz_name)\n if os.path.isfile(tgz):\n result[tgz_name] = tgz\n elif tgz_files:\n compresslevel = self._global_conf.get(\"core.gzip:compresslevel\", check_type=int)\n tgz = compress_files(tgz_files, tgz_name, download_export_folder,\n compresslevel=compresslevel, ref=ref)\n result[tgz_name] = tgz\n\n add_tgz(EXPORT_TGZ_NAME, files)\n add_tgz(EXPORT_SOURCES_TGZ_NAME, src_files)\n return result\n\n def _prepare_package(self, pref, prev_bundle):\n pkg_layout = self._app.cache.pkg_layout(pref)\n if pkg_layout.package_is_dirty():\n raise ConanException(f\"Package {pref} is corrupted, aborting upload.\\n\"\n f\"Remove it with 'conan remove {pref}'\")\n cache_files = self._compress_package_files(pkg_layout, pref)\n prev_bundle[\"files\"] = cache_files\n\n def _compress_package_files(self, layout, pref):\n output = ConanOutput(scope=str(pref))\n download_pkg_folder = layout.download_package()\n package_tgz = os.path.join(download_pkg_folder, PACKAGE_TGZ_NAME)\n if is_dirty(package_tgz):\n output.warning(\"Removing %s, marked as dirty\" % PACKAGE_TGZ_NAME)\n os.remove(package_tgz)\n clean_dirty(package_tgz)\n\n # Get all the files in that directory\n # existing package, will use short paths if defined\n package_folder = layout.package()\n files, symlinked_folders = gather_files(package_folder)\n files.update(symlinked_folders)\n\n if CONANINFO not in files or CONAN_MANIFEST not in files:\n raise ConanException(\"Cannot upload corrupted package '%s'\" % str(pref))\n\n # Do a copy so the location of CONANINFO and MANIFEST is the \"download\" folder one\n mkdir(download_pkg_folder)\n shutil.copy2(os.path.join(package_folder, CONANINFO),\n os.path.join(download_pkg_folder, CONANINFO))\n shutil.copy2(os.path.join(package_folder, CONAN_MANIFEST),\n os.path.join(download_pkg_folder, CONAN_MANIFEST))\n # Files NOT included in the tgz\n files.pop(CONANINFO)\n files.pop(CONAN_MANIFEST)\n\n if not os.path.isfile(package_tgz):\n tgz_files = {f: path for f, path in files.items()}\n compresslevel = self._global_conf.get(\"core.gzip:compresslevel\", check_type=int)\n tgz_path = compress_files(tgz_files, PACKAGE_TGZ_NAME, download_pkg_folder,\n compresslevel=compresslevel, ref=pref)\n assert tgz_path == package_tgz\n assert os.path.exists(package_tgz)\n\n return {PACKAGE_TGZ_NAME: package_tgz,\n CONANINFO: os.path.join(download_pkg_folder, CONANINFO),\n CONAN_MANIFEST: os.path.join(download_pkg_folder, CONAN_MANIFEST)}\n\n\nclass UploadExecutor:\n \"\"\" does the actual file transfer to the remote. The files to be uploaded have already\n been computed and are passed in the ``upload_data`` parameter, so this executor is also\n agnostic about which files are transferred\n \"\"\"\n def __init__(self, app: ConanApp):\n self._app = app\n\n def upload(self, upload_data, remote):\n for ref, bundle in upload_data.refs().items():\n if bundle.get(\"upload\"):\n self.upload_recipe(ref, bundle, remote)\n for pref, prev_bundle in upload_data.prefs(ref, bundle).items():\n if prev_bundle.get(\"upload\"):\n self.upload_package(pref, prev_bundle, remote)\n\n def upload_recipe(self, ref, bundle, remote):\n output = ConanOutput(scope=str(ref))\n output.info(f\"Uploading recipe '{ref.repr_notime()}'\")\n t1 = time.time()\n cache_files = bundle[\"files\"]\n\n self._app.remote_manager.upload_recipe(ref, cache_files, remote)\n\n duration = time.time() - t1\n output.debug(f\"Upload {ref} in {duration} time\")\n return ref\n\n def upload_package(self, pref, prev_bundle, remote):\n output = ConanOutput(scope=str(pref.ref))\n output.info(f\"Uploading package '{pref.repr_notime()}'\")\n cache_files = prev_bundle[\"files\"]\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n\n t1 = time.time()\n self._app.remote_manager.upload_package(pref, cache_files, remote)\n duration = time.time() - t1\n output.debug(f\"Upload {pref} in {duration} time\")\n\n\ndef compress_files(files, name, dest_dir, compresslevel=None, ref=None):\n t1 = time.time()\n # FIXME, better write to disk sequentially and not keep tgz contents in memory\n tgz_path = os.path.join(dest_dir, name)\n ConanOutput(scope=str(ref)).info(f\"Compressing {name}\")\n with set_dirty_context_manager(tgz_path), open(tgz_path, \"wb\") as tgz_handle:\n tgz = gzopen_without_timestamps(name, mode=\"w\", fileobj=tgz_handle,\n compresslevel=compresslevel)\n for filename, abs_path in sorted(files.items()):\n # recursive is False in case it is a symlink to a folder\n tgz.add(abs_path, filename, recursive=False)\n tgz.close()\n\n duration = time.time() - t1\n ConanOutput().debug(f\"{name} compressed in {duration} time\")\n return tgz_path\n", "path": "conans/client/cmd/uploader.py"}], "after_files": [{"content": "import os\nimport shutil\nimport time\n\nfrom conan.internal.conan_app import ConanApp\nfrom conan.api.output import ConanOutput\nfrom conans.client.source import retrieve_exports_sources\nfrom conans.errors import ConanException, NotFoundException\nfrom conans.paths import (CONAN_MANIFEST, CONANFILE, EXPORT_SOURCES_TGZ_NAME,\n EXPORT_TGZ_NAME, PACKAGE_TGZ_NAME, CONANINFO)\nfrom conans.util.files import (clean_dirty, is_dirty, gather_files,\n gzopen_without_timestamps, set_dirty_context_manager, mkdir,\n human_size)\n\nUPLOAD_POLICY_FORCE = \"force-upload\"\nUPLOAD_POLICY_SKIP = \"skip-upload\"\n\n\nclass UploadUpstreamChecker:\n \"\"\" decides if something needs to be uploaded or force-uploaded checking if that exact\n revision already exists in the remote server, or if the --force parameter is forcing the upload\n This is completely irrespective of the actual package contents, it only uses the local\n computed revision and the remote one\n \"\"\"\n def __init__(self, app: ConanApp):\n self._app = app\n\n def check(self, upload_bundle, remote, force):\n for ref, recipe_bundle in upload_bundle.refs().items():\n self._check_upstream_recipe(ref, recipe_bundle, remote, force)\n for pref, prev_bundle in upload_bundle.prefs(ref, recipe_bundle).items():\n self._check_upstream_package(pref, prev_bundle, remote, force)\n\n def _check_upstream_recipe(self, ref, ref_bundle, remote, force):\n output = ConanOutput(scope=str(ref))\n output.info(\"Checking which revisions exist in the remote server\")\n try:\n assert ref.revision\n # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence\n server_ref = self._app.remote_manager.get_recipe_revision_reference(ref, remote)\n assert server_ref # If successful (not raising NotFoundException), this will exist\n except NotFoundException:\n ref_bundle[\"force_upload\"] = False\n ref_bundle[\"upload\"] = True\n else:\n if force:\n output.info(f\"Recipe '{ref.repr_notime()}' already in server, forcing upload\")\n ref_bundle[\"force_upload\"] = True\n ref_bundle[\"upload\"] = True\n else:\n output.info(f\"Recipe '{ref.repr_notime()}' already in server, skipping upload\")\n ref_bundle[\"upload\"] = False\n ref_bundle[\"force_upload\"] = False\n\n def _check_upstream_package(self, pref, prev_bundle, remote, force):\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n\n try:\n # TODO: It is a bit ugly, interface-wise to ask for revisions to check existence\n server_revisions = self._app.remote_manager.get_package_revision_reference(pref, remote)\n assert server_revisions\n except NotFoundException:\n prev_bundle[\"force_upload\"] = False\n prev_bundle[\"upload\"] = True\n else:\n output = ConanOutput(scope=str(pref.ref))\n if force:\n output.info(f\"Package '{pref.repr_notime()}' already in server, forcing upload\")\n prev_bundle[\"force_upload\"] = True\n prev_bundle[\"upload\"] = True\n else:\n output.info(f\"Package '{pref.repr_notime()}' already in server, skipping upload\")\n prev_bundle[\"force_upload\"] = False\n prev_bundle[\"upload\"] = False\n\n\nclass PackagePreparator:\n def __init__(self, app: ConanApp, global_conf):\n self._app = app\n self._global_conf = global_conf\n\n def prepare(self, upload_bundle, enabled_remotes):\n for ref, bundle in upload_bundle.refs().items():\n layout = self._app.cache.recipe_layout(ref)\n conanfile_path = layout.conanfile()\n conanfile = self._app.loader.load_basic(conanfile_path)\n\n if bundle.get(\"upload\"):\n self._prepare_recipe(ref, bundle, conanfile, enabled_remotes)\n for pref, prev_bundle in upload_bundle.prefs(ref, bundle).items():\n if prev_bundle.get(\"upload\"):\n self._prepare_package(pref, prev_bundle)\n\n def _prepare_recipe(self, ref, ref_bundle, conanfile, remotes):\n \"\"\" do a bunch of things that are necessary before actually executing the upload:\n - retrieve exports_sources to complete the recipe if necessary\n - compress the artifacts in conan_export.tgz and conan_export_sources.tgz\n \"\"\"\n try:\n recipe_layout = self._app.cache.recipe_layout(ref)\n retrieve_exports_sources(self._app.remote_manager, recipe_layout, conanfile, ref,\n remotes)\n cache_files = self._compress_recipe_files(recipe_layout, ref)\n ref_bundle[\"files\"] = cache_files\n except Exception as e:\n raise ConanException(f\"{ref} Error while compressing: {e}\")\n\n def _compress_recipe_files(self, layout, ref):\n download_export_folder = layout.download_export()\n\n output = ConanOutput(scope=str(ref))\n for f in (EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME):\n tgz_path = os.path.join(download_export_folder, f)\n if is_dirty(tgz_path):\n output.warning(\"Removing %s, marked as dirty\" % f)\n os.remove(tgz_path)\n clean_dirty(tgz_path)\n\n export_folder = layout.export()\n files, symlinked_folders = gather_files(export_folder)\n files.update(symlinked_folders)\n if CONANFILE not in files or CONAN_MANIFEST not in files:\n raise ConanException(\"Cannot upload corrupted recipe '%s'\" % str(ref))\n export_src_folder = layout.export_sources()\n src_files, src_symlinked_folders = gather_files(export_src_folder)\n src_files.update(src_symlinked_folders)\n\n # We do a copy of conanfile and conanmanifest to the download_export_folder\n # so it is identical as when it is downloaded, and all files are from the same location\n # to be uploaded\n mkdir(download_export_folder)\n shutil.copy2(os.path.join(export_folder, CONANFILE),\n os.path.join(download_export_folder, CONANFILE))\n shutil.copy2(os.path.join(export_folder, CONAN_MANIFEST),\n os.path.join(download_export_folder, CONAN_MANIFEST))\n result = {CONANFILE: os.path.join(download_export_folder, CONANFILE),\n CONAN_MANIFEST: os.path.join(download_export_folder, CONAN_MANIFEST)}\n # Files NOT included in the tgz\n files.pop(CONANFILE)\n files.pop(CONAN_MANIFEST)\n\n def add_tgz(tgz_name, tgz_files):\n tgz = os.path.join(download_export_folder, tgz_name)\n if os.path.isfile(tgz):\n result[tgz_name] = tgz\n elif tgz_files:\n compresslevel = self._global_conf.get(\"core.gzip:compresslevel\", check_type=int)\n tgz = compress_files(tgz_files, tgz_name, download_export_folder,\n compresslevel=compresslevel, ref=ref)\n result[tgz_name] = tgz\n\n add_tgz(EXPORT_TGZ_NAME, files)\n add_tgz(EXPORT_SOURCES_TGZ_NAME, src_files)\n return result\n\n def _prepare_package(self, pref, prev_bundle):\n pkg_layout = self._app.cache.pkg_layout(pref)\n if pkg_layout.package_is_dirty():\n raise ConanException(f\"Package {pref} is corrupted, aborting upload.\\n\"\n f\"Remove it with 'conan remove {pref}'\")\n cache_files = self._compress_package_files(pkg_layout, pref)\n prev_bundle[\"files\"] = cache_files\n\n def _compress_package_files(self, layout, pref):\n output = ConanOutput(scope=str(pref))\n download_pkg_folder = layout.download_package()\n package_tgz = os.path.join(download_pkg_folder, PACKAGE_TGZ_NAME)\n if is_dirty(package_tgz):\n output.warning(\"Removing %s, marked as dirty\" % PACKAGE_TGZ_NAME)\n os.remove(package_tgz)\n clean_dirty(package_tgz)\n\n # Get all the files in that directory\n # existing package, will use short paths if defined\n package_folder = layout.package()\n files, symlinked_folders = gather_files(package_folder)\n files.update(symlinked_folders)\n\n if CONANINFO not in files or CONAN_MANIFEST not in files:\n raise ConanException(\"Cannot upload corrupted package '%s'\" % str(pref))\n\n # Do a copy so the location of CONANINFO and MANIFEST is the \"download\" folder one\n mkdir(download_pkg_folder)\n shutil.copy2(os.path.join(package_folder, CONANINFO),\n os.path.join(download_pkg_folder, CONANINFO))\n shutil.copy2(os.path.join(package_folder, CONAN_MANIFEST),\n os.path.join(download_pkg_folder, CONAN_MANIFEST))\n # Files NOT included in the tgz\n files.pop(CONANINFO)\n files.pop(CONAN_MANIFEST)\n\n if not os.path.isfile(package_tgz):\n tgz_files = {f: path for f, path in files.items()}\n compresslevel = self._global_conf.get(\"core.gzip:compresslevel\", check_type=int)\n tgz_path = compress_files(tgz_files, PACKAGE_TGZ_NAME, download_pkg_folder,\n compresslevel=compresslevel, ref=pref)\n assert tgz_path == package_tgz\n assert os.path.exists(package_tgz)\n\n return {PACKAGE_TGZ_NAME: package_tgz,\n CONANINFO: os.path.join(download_pkg_folder, CONANINFO),\n CONAN_MANIFEST: os.path.join(download_pkg_folder, CONAN_MANIFEST)}\n\n\nclass UploadExecutor:\n \"\"\" does the actual file transfer to the remote. The files to be uploaded have already\n been computed and are passed in the ``upload_data`` parameter, so this executor is also\n agnostic about which files are transferred\n \"\"\"\n def __init__(self, app: ConanApp):\n self._app = app\n\n def upload(self, upload_data, remote):\n for ref, bundle in upload_data.refs().items():\n if bundle.get(\"upload\"):\n self.upload_recipe(ref, bundle, remote)\n for pref, prev_bundle in upload_data.prefs(ref, bundle).items():\n if prev_bundle.get(\"upload\"):\n self.upload_package(pref, prev_bundle, remote)\n\n def upload_recipe(self, ref, bundle, remote):\n output = ConanOutput(scope=str(ref))\n cache_files = bundle[\"files\"]\n\n output.info(f\"Uploading recipe '{ref.repr_notime()}' ({_total_size(cache_files)})\")\n\n t1 = time.time()\n self._app.remote_manager.upload_recipe(ref, cache_files, remote)\n\n duration = time.time() - t1\n output.debug(f\"Upload {ref} in {duration} time\")\n return ref\n\n def upload_package(self, pref, prev_bundle, remote):\n output = ConanOutput(scope=str(pref.ref))\n cache_files = prev_bundle[\"files\"]\n assert (pref.revision is not None), \"Cannot upload a package without PREV\"\n assert (pref.ref.revision is not None), \"Cannot upload a package without RREV\"\n\n output.info(f\"Uploading package '{pref.repr_notime()}' ({_total_size(cache_files)})\")\n\n t1 = time.time()\n self._app.remote_manager.upload_package(pref, cache_files, remote)\n duration = time.time() - t1\n output.debug(f\"Upload {pref} in {duration} time\")\n\n\ndef compress_files(files, name, dest_dir, compresslevel=None, ref=None):\n t1 = time.time()\n # FIXME, better write to disk sequentially and not keep tgz contents in memory\n tgz_path = os.path.join(dest_dir, name)\n ConanOutput(scope=str(ref)).info(f\"Compressing {name}\")\n with set_dirty_context_manager(tgz_path), open(tgz_path, \"wb\") as tgz_handle:\n tgz = gzopen_without_timestamps(name, mode=\"w\", fileobj=tgz_handle,\n compresslevel=compresslevel)\n for filename, abs_path in sorted(files.items()):\n # recursive is False in case it is a symlink to a folder\n tgz.add(abs_path, filename, recursive=False)\n tgz.close()\n\n duration = time.time() - t1\n ConanOutput().debug(f\"{name} compressed in {duration} time\")\n return tgz_path\n\n\ndef _total_size(cache_files):\n total_size = 0\n for file in cache_files.values():\n stat = os.stat(file)\n total_size += stat.st_size\n return human_size(total_size)\n", "path": "conans/client/cmd/uploader.py"}]} | 3,813 | 555 |
gh_patches_debug_14944 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1788 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Move getting started guide to opentelemetry.io
For GA there is an ask to add at least all getting started documentation to the opentelemetry.io website -- for Python this is tracked in: https://github.com/open-telemetry/opentelemetry.io/issues/285. Maintaining the documentation in two places is not ideal so proposal is to move getting started documentation. A similar activity already happened for Java.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/getting_started/otlpcollector_example.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # otcollector.py
16 import time
17
18 from opentelemetry import trace
19 from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (
20 OTLPSpanExporter,
21 )
22 from opentelemetry.sdk.trace import TracerProvider
23 from opentelemetry.sdk.trace.export import BatchSpanProcessor
24
25 span_exporter = OTLPSpanExporter(
26 # optional
27 # endpoint:="myCollectorURL:55678",
28 # credentials=ChannelCredentials(credentials),
29 # headers=(("metadata", "metadata")),
30 )
31 tracer_provider = TracerProvider()
32 trace.set_tracer_provider(tracer_provider)
33 span_processor = BatchSpanProcessor(span_exporter)
34 tracer_provider.add_span_processor(span_processor)
35
36 # Configure the tracer to use the collector exporter
37 tracer = trace.get_tracer_provider().get_tracer(__name__)
38
39 with tracer.start_as_current_span("foo"):
40 print("Hello world!")
41
```
Path: `docs/getting_started/tracing_example.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 # tracing.py
16 from opentelemetry import trace
17 from opentelemetry.sdk.trace import TracerProvider
18 from opentelemetry.sdk.trace.export import (
19 ConsoleSpanExporter,
20 SimpleSpanProcessor,
21 )
22
23 trace.set_tracer_provider(TracerProvider())
24 trace.get_tracer_provider().add_span_processor(
25 SimpleSpanProcessor(ConsoleSpanExporter())
26 )
27
28 tracer = trace.get_tracer(__name__)
29
30 with tracer.start_as_current_span("foo"):
31 with tracer.start_as_current_span("bar"):
32 with tracer.start_as_current_span("baz"):
33 print("Hello world from OpenTelemetry Python!")
34
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py
--- a/docs/getting_started/otlpcollector_example.py
+++ b/docs/getting_started/otlpcollector_example.py
@@ -24,7 +24,7 @@
span_exporter = OTLPSpanExporter(
# optional
- # endpoint:="myCollectorURL:55678",
+ # endpoint:="myCollectorURL:4317",
# credentials=ChannelCredentials(credentials),
# headers=(("metadata", "metadata")),
)
diff --git a/docs/getting_started/tracing_example.py b/docs/getting_started/tracing_example.py
--- a/docs/getting_started/tracing_example.py
+++ b/docs/getting_started/tracing_example.py
@@ -20,10 +20,11 @@
SimpleSpanProcessor,
)
-trace.set_tracer_provider(TracerProvider())
-trace.get_tracer_provider().add_span_processor(
- SimpleSpanProcessor(ConsoleSpanExporter())
-)
+provider = TracerProvider()
+processor = SimpleSpanProcessor(ConsoleSpanExporter())
+provider.add_span_processor(processor)
+trace.set_tracer_provider(provider)
+
tracer = trace.get_tracer(__name__)
| {"golden_diff": "diff --git a/docs/getting_started/otlpcollector_example.py b/docs/getting_started/otlpcollector_example.py\n--- a/docs/getting_started/otlpcollector_example.py\n+++ b/docs/getting_started/otlpcollector_example.py\n@@ -24,7 +24,7 @@\n \n span_exporter = OTLPSpanExporter(\n # optional\n- # endpoint:=\"myCollectorURL:55678\",\n+ # endpoint:=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n )\ndiff --git a/docs/getting_started/tracing_example.py b/docs/getting_started/tracing_example.py\n--- a/docs/getting_started/tracing_example.py\n+++ b/docs/getting_started/tracing_example.py\n@@ -20,10 +20,11 @@\n SimpleSpanProcessor,\n )\n \n-trace.set_tracer_provider(TracerProvider())\n-trace.get_tracer_provider().add_span_processor(\n- SimpleSpanProcessor(ConsoleSpanExporter())\n-)\n+provider = TracerProvider()\n+processor = SimpleSpanProcessor(ConsoleSpanExporter())\n+provider.add_span_processor(processor)\n+trace.set_tracer_provider(provider)\n+\n \n tracer = trace.get_tracer(__name__)\n", "issue": "Move getting started guide to opentelemetry.io\nFor GA there is an ask to add at least all getting started documentation to the opentelemetry.io website -- for Python this is tracked in: https://github.com/open-telemetry/opentelemetry.io/issues/285. Maintaining the documentation in two places is not ideal so proposal is to move getting started documentation. A similar activity already happened for Java.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint:=\"myCollectorURL:55678\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# tracing.py\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n)\n\ntrace.set_tracer_provider(TracerProvider())\ntrace.get_tracer_provider().add_span_processor(\n SimpleSpanProcessor(ConsoleSpanExporter())\n)\n\ntracer = trace.get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n", "path": "docs/getting_started/tracing_example.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# otcollector.py\nimport time\n\nfrom opentelemetry import trace\nfrom opentelemetry.exporter.otlp.proto.grpc.trace_exporter import (\n OTLPSpanExporter,\n)\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import BatchSpanProcessor\n\nspan_exporter = OTLPSpanExporter(\n # optional\n # endpoint:=\"myCollectorURL:4317\",\n # credentials=ChannelCredentials(credentials),\n # headers=((\"metadata\", \"metadata\")),\n)\ntracer_provider = TracerProvider()\ntrace.set_tracer_provider(tracer_provider)\nspan_processor = BatchSpanProcessor(span_exporter)\ntracer_provider.add_span_processor(span_processor)\n\n# Configure the tracer to use the collector exporter\ntracer = trace.get_tracer_provider().get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n print(\"Hello world!\")\n", "path": "docs/getting_started/otlpcollector_example.py"}, {"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# tracing.py\nfrom opentelemetry import trace\nfrom opentelemetry.sdk.trace import TracerProvider\nfrom opentelemetry.sdk.trace.export import (\n ConsoleSpanExporter,\n SimpleSpanProcessor,\n)\n\nprovider = TracerProvider()\nprocessor = SimpleSpanProcessor(ConsoleSpanExporter())\nprovider.add_span_processor(processor)\ntrace.set_tracer_provider(provider)\n\n\ntracer = trace.get_tracer(__name__)\n\nwith tracer.start_as_current_span(\"foo\"):\n with tracer.start_as_current_span(\"bar\"):\n with tracer.start_as_current_span(\"baz\"):\n print(\"Hello world from OpenTelemetry Python!\")\n", "path": "docs/getting_started/tracing_example.py"}]} | 1,065 | 271 |
gh_patches_debug_42548 | rasdani/github-patches | git_diff | deepset-ai__haystack-7841 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
MAP and MRR wrong for multiple gold documents
**Describe the bug**
Both the MAP and the MRR show wrong values. It seems we calculate the score for single gold documents and then override it after each instead of calculating it for the whole batch of gold documents.
**Expected behavior**
correct values
**To Reproduce**
```
retrieved_docs = ["one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten"]
gold_docs = ["one", "two", "three","four","seven"]
from haystack.components.evaluators import DocumentMAPEvaluator, DocumentMRREvaluator
from haystack import Document
mapevaluator = DocumentMAPEvaluator()
mrrevaluator = DocumentMRREvaluator()
mapresult = mapevaluator.run(
ground_truth_documents=[[Document(content=content) for content in gold_docs]],
retrieved_documents=[[Document(content=content) for content in retrieved_docs]])
mrrresult = mrrevaluator.run(
ground_truth_documents=[[Document(content=content) for content in gold_docs]],
retrieved_documents=[[Document(content=content) for content in retrieved_docs]])
print(mapresult["individual_scores"])
print(mrrresult["individual_scores"])
print(mapresult["score"])
print(mrrresult["score"])
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `haystack/components/evaluators/document_mrr.py`
Content:
```
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 from typing import Any, Dict, List
6
7 from haystack import Document, component
8
9
10 @component
11 class DocumentMRREvaluator:
12 """
13 Evaluator that calculates the mean reciprocal rank of the retrieved documents.
14
15 MRR measures how high the first retrieved document is ranked.
16 Each question can have multiple ground truth documents and multiple retrieved documents.
17
18 `DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
19 should be used to clean and normalize the documents before passing them to this evaluator.
20
21 Usage example:
22 ```python
23 from haystack import Document
24 from haystack.components.evaluators import DocumentMRREvaluator
25
26 evaluator = DocumentMRREvaluator()
27 result = evaluator.run(
28 ground_truth_documents=[
29 [Document(content="France")],
30 [Document(content="9th century"), Document(content="9th")],
31 ],
32 retrieved_documents=[
33 [Document(content="France")],
34 [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
35 ],
36 )
37 print(result["individual_scores"])
38 # [1.0, 1.0]
39 print(result["score"])
40 # 1.0
41 ```
42 """
43
44 @component.output_types(score=float, individual_scores=List[float])
45 def run(
46 self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
47 ) -> Dict[str, Any]:
48 """
49 Run the DocumentMRREvaluator on the given inputs.
50
51 `ground_truth_documents` and `retrieved_documents` must have the same length.
52
53 :param ground_truth_documents:
54 A list of expected documents for each question.
55 :param retrieved_documents:
56 A list of retrieved documents for each question.
57 :returns:
58 A dictionary with the following outputs:
59 - `score` - The average of calculated scores.
60 - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked.
61 """
62 if len(ground_truth_documents) != len(retrieved_documents):
63 msg = "The length of ground_truth_documents and retrieved_documents must be the same."
64 raise ValueError(msg)
65
66 individual_scores = []
67
68 for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
69 score = 0.0
70 for ground_document in ground_truth:
71 if ground_document.content is None:
72 continue
73
74 for rank, retrieved_document in enumerate(retrieved):
75 if retrieved_document.content is None:
76 continue
77
78 if ground_document.content in retrieved_document.content:
79 score = 1 / (rank + 1)
80 break
81 individual_scores.append(score)
82
83 score = sum(individual_scores) / len(retrieved_documents)
84
85 return {"score": score, "individual_scores": individual_scores}
86
```
Path: `haystack/components/evaluators/document_map.py`
Content:
```
1 # SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>
2 #
3 # SPDX-License-Identifier: Apache-2.0
4
5 from typing import Any, Dict, List
6
7 from haystack import Document, component
8
9
10 @component
11 class DocumentMAPEvaluator:
12 """
13 A Mean Average Precision (MAP) evaluator for documents.
14
15 Evaluator that calculates the mean average precision of the retrieved documents, a metric
16 that measures how high retrieved documents are ranked.
17 Each question can have multiple ground truth documents and multiple retrieved documents.
18
19 `DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component
20 should be used to clean and normalize the documents before passing them to this evaluator.
21
22 Usage example:
23 ```python
24 from haystack import Document
25 from haystack.components.evaluators import DocumentMAPEvaluator
26
27 evaluator = DocumentMAPEvaluator()
28 result = evaluator.run(
29 ground_truth_documents=[
30 [Document(content="France")],
31 [Document(content="9th century"), Document(content="9th")],
32 ],
33 retrieved_documents=[
34 [Document(content="France")],
35 [Document(content="9th century"), Document(content="10th century"), Document(content="9th")],
36 ],
37 )
38
39 print(result["individual_scores"])
40 # [1.0, 0.8333333333333333]
41 print(result["score"])
42 # 0.9166666666666666
43 ```
44 """
45
46 @component.output_types(score=float, individual_scores=List[float])
47 def run(
48 self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
49 ) -> Dict[str, Any]:
50 """
51 Run the DocumentMAPEvaluator on the given inputs.
52
53 All lists must have the same length.
54
55 :param ground_truth_documents:
56 A list of expected documents for each question.
57 :param retrieved_documents:
58 A list of retrieved documents for each question.
59 :returns:
60 A dictionary with the following outputs:
61 - `score` - The average of calculated scores.
62 - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked.
63 """
64 if len(ground_truth_documents) != len(retrieved_documents):
65 msg = "The length of ground_truth_documents and retrieved_documents must be the same."
66 raise ValueError(msg)
67
68 individual_scores = []
69
70 for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
71 score = 0.0
72 for ground_document in ground_truth:
73 if ground_document.content is None:
74 continue
75
76 average_precision = 0.0
77 relevant_documents = 0
78
79 for rank, retrieved_document in enumerate(retrieved):
80 if retrieved_document.content is None:
81 continue
82
83 if ground_document.content in retrieved_document.content:
84 relevant_documents += 1
85 average_precision += relevant_documents / (rank + 1)
86 if relevant_documents > 0:
87 score = average_precision / relevant_documents
88 individual_scores.append(score)
89
90 score = sum(individual_scores) / len(retrieved_documents)
91
92 return {"score": score, "individual_scores": individual_scores}
93
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/haystack/components/evaluators/document_map.py b/haystack/components/evaluators/document_map.py
--- a/haystack/components/evaluators/document_map.py
+++ b/haystack/components/evaluators/document_map.py
@@ -43,6 +43,7 @@
```
"""
+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.
@component.output_types(score=float, individual_scores=List[float])
def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
@@ -68,25 +69,21 @@
individual_scores = []
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
- score = 0.0
- for ground_document in ground_truth:
- if ground_document.content is None:
- continue
-
- average_precision = 0.0
- relevant_documents = 0
+ average_precision = 0.0
+ average_precision_numerator = 0.0
+ relevant_documents = 0
- for rank, retrieved_document in enumerate(retrieved):
- if retrieved_document.content is None:
- continue
-
- if ground_document.content in retrieved_document.content:
- relevant_documents += 1
- average_precision += relevant_documents / (rank + 1)
- if relevant_documents > 0:
- score = average_precision / relevant_documents
- individual_scores.append(score)
+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]
+ for rank, retrieved_document in enumerate(retrieved):
+ if retrieved_document.content is None:
+ continue
- score = sum(individual_scores) / len(retrieved_documents)
+ if retrieved_document.content in ground_truth_contents:
+ relevant_documents += 1
+ average_precision_numerator += relevant_documents / (rank + 1)
+ if relevant_documents > 0:
+ average_precision = average_precision_numerator / relevant_documents
+ individual_scores.append(average_precision)
+ score = sum(individual_scores) / len(ground_truth_documents)
return {"score": score, "individual_scores": individual_scores}
diff --git a/haystack/components/evaluators/document_mrr.py b/haystack/components/evaluators/document_mrr.py
--- a/haystack/components/evaluators/document_mrr.py
+++ b/haystack/components/evaluators/document_mrr.py
@@ -41,6 +41,7 @@
```
"""
+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.
@component.output_types(score=float, individual_scores=List[float])
def run(
self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]
@@ -66,20 +67,17 @@
individual_scores = []
for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):
- score = 0.0
- for ground_document in ground_truth:
- if ground_document.content is None:
- continue
-
- for rank, retrieved_document in enumerate(retrieved):
- if retrieved_document.content is None:
- continue
+ reciprocal_rank = 0.0
- if ground_document.content in retrieved_document.content:
- score = 1 / (rank + 1)
- break
- individual_scores.append(score)
+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]
+ for rank, retrieved_document in enumerate(retrieved):
+ if retrieved_document.content is None:
+ continue
+ if retrieved_document.content in ground_truth_contents:
+ reciprocal_rank = 1 / (rank + 1)
+ break
+ individual_scores.append(reciprocal_rank)
- score = sum(individual_scores) / len(retrieved_documents)
+ score = sum(individual_scores) / len(ground_truth_documents)
return {"score": score, "individual_scores": individual_scores}
| {"golden_diff": "diff --git a/haystack/components/evaluators/document_map.py b/haystack/components/evaluators/document_map.py\n--- a/haystack/components/evaluators/document_map.py\n+++ b/haystack/components/evaluators/document_map.py\n@@ -43,6 +43,7 @@\n ```\n \"\"\"\n \n+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n@@ -68,25 +69,21 @@\n individual_scores = []\n \n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n- score = 0.0\n- for ground_document in ground_truth:\n- if ground_document.content is None:\n- continue\n-\n- average_precision = 0.0\n- relevant_documents = 0\n+ average_precision = 0.0\n+ average_precision_numerator = 0.0\n+ relevant_documents = 0\n \n- for rank, retrieved_document in enumerate(retrieved):\n- if retrieved_document.content is None:\n- continue\n-\n- if ground_document.content in retrieved_document.content:\n- relevant_documents += 1\n- average_precision += relevant_documents / (rank + 1)\n- if relevant_documents > 0:\n- score = average_precision / relevant_documents\n- individual_scores.append(score)\n+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]\n+ for rank, retrieved_document in enumerate(retrieved):\n+ if retrieved_document.content is None:\n+ continue\n \n- score = sum(individual_scores) / len(retrieved_documents)\n+ if retrieved_document.content in ground_truth_contents:\n+ relevant_documents += 1\n+ average_precision_numerator += relevant_documents / (rank + 1)\n+ if relevant_documents > 0:\n+ average_precision = average_precision_numerator / relevant_documents\n+ individual_scores.append(average_precision)\n \n+ score = sum(individual_scores) / len(ground_truth_documents)\n return {\"score\": score, \"individual_scores\": individual_scores}\ndiff --git a/haystack/components/evaluators/document_mrr.py b/haystack/components/evaluators/document_mrr.py\n--- a/haystack/components/evaluators/document_mrr.py\n+++ b/haystack/components/evaluators/document_mrr.py\n@@ -41,6 +41,7 @@\n ```\n \"\"\"\n \n+ # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n@@ -66,20 +67,17 @@\n individual_scores = []\n \n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n- score = 0.0\n- for ground_document in ground_truth:\n- if ground_document.content is None:\n- continue\n-\n- for rank, retrieved_document in enumerate(retrieved):\n- if retrieved_document.content is None:\n- continue\n+ reciprocal_rank = 0.0\n \n- if ground_document.content in retrieved_document.content:\n- score = 1 / (rank + 1)\n- break\n- individual_scores.append(score)\n+ ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]\n+ for rank, retrieved_document in enumerate(retrieved):\n+ if retrieved_document.content is None:\n+ continue\n+ if retrieved_document.content in ground_truth_contents:\n+ reciprocal_rank = 1 / (rank + 1)\n+ break\n+ individual_scores.append(reciprocal_rank)\n \n- score = sum(individual_scores) / len(retrieved_documents)\n+ score = sum(individual_scores) / len(ground_truth_documents)\n \n return {\"score\": score, \"individual_scores\": individual_scores}\n", "issue": "MAP and MRR wrong for multiple gold documents\n**Describe the bug**\r\nBoth the MAP and the MRR show wrong values. It seems we calculate the score for single gold documents and then override it after each instead of calculating it for the whole batch of gold documents. \r\n\r\n**Expected behavior**\r\ncorrect values\r\n\r\n**To Reproduce**\r\n```\r\nretrieved_docs = [\"one\", \"two\", \"three\", \"four\", \"five\", \"six\", \"seven\", \"eight\", \"nine\", \"ten\"]\r\ngold_docs = [\"one\", \"two\", \"three\",\"four\",\"seven\"]\r\n\r\nfrom haystack.components.evaluators import DocumentMAPEvaluator, DocumentMRREvaluator\r\nfrom haystack import Document\r\n\r\nmapevaluator = DocumentMAPEvaluator()\r\nmrrevaluator = DocumentMRREvaluator()\r\nmapresult = mapevaluator.run(\r\n ground_truth_documents=[[Document(content=content) for content in gold_docs]],\r\n retrieved_documents=[[Document(content=content) for content in retrieved_docs]])\r\n\r\nmrrresult = mrrevaluator.run(\r\n ground_truth_documents=[[Document(content=content) for content in gold_docs]],\r\n retrieved_documents=[[Document(content=content) for content in retrieved_docs]])\r\n\r\nprint(mapresult[\"individual_scores\"])\r\nprint(mrrresult[\"individual_scores\"])\r\nprint(mapresult[\"score\"])\r\nprint(mrrresult[\"score\"])\r\n```\r\n\n", "before_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentMRREvaluator:\n \"\"\"\n Evaluator that calculates the mean reciprocal rank of the retrieved documents.\n\n MRR measures how high the first retrieved document is ranked.\n Each question can have multiple ground truth documents and multiple retrieved documents.\n\n `DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component\n should be used to clean and normalize the documents before passing them to this evaluator.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.evaluators import DocumentMRREvaluator\n\n evaluator = DocumentMRREvaluator()\n result = evaluator.run(\n ground_truth_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"9th\")],\n ],\n retrieved_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"10th century\"), Document(content=\"9th\")],\n ],\n )\n print(result[\"individual_scores\"])\n # [1.0, 1.0]\n print(result[\"score\"])\n # 1.0\n ```\n \"\"\"\n\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n ) -> Dict[str, Any]:\n \"\"\"\n Run the DocumentMRREvaluator on the given inputs.\n\n `ground_truth_documents` and `retrieved_documents` must have the same length.\n\n :param ground_truth_documents:\n A list of expected documents for each question.\n :param retrieved_documents:\n A list of retrieved documents for each question.\n :returns:\n A dictionary with the following outputs:\n - `score` - The average of calculated scores.\n - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked.\n \"\"\"\n if len(ground_truth_documents) != len(retrieved_documents):\n msg = \"The length of ground_truth_documents and retrieved_documents must be the same.\"\n raise ValueError(msg)\n\n individual_scores = []\n\n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n score = 0.0\n for ground_document in ground_truth:\n if ground_document.content is None:\n continue\n\n for rank, retrieved_document in enumerate(retrieved):\n if retrieved_document.content is None:\n continue\n\n if ground_document.content in retrieved_document.content:\n score = 1 / (rank + 1)\n break\n individual_scores.append(score)\n\n score = sum(individual_scores) / len(retrieved_documents)\n\n return {\"score\": score, \"individual_scores\": individual_scores}\n", "path": "haystack/components/evaluators/document_mrr.py"}, {"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentMAPEvaluator:\n \"\"\"\n A Mean Average Precision (MAP) evaluator for documents.\n\n Evaluator that calculates the mean average precision of the retrieved documents, a metric\n that measures how high retrieved documents are ranked.\n Each question can have multiple ground truth documents and multiple retrieved documents.\n\n `DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component\n should be used to clean and normalize the documents before passing them to this evaluator.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.evaluators import DocumentMAPEvaluator\n\n evaluator = DocumentMAPEvaluator()\n result = evaluator.run(\n ground_truth_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"9th\")],\n ],\n retrieved_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"10th century\"), Document(content=\"9th\")],\n ],\n )\n\n print(result[\"individual_scores\"])\n # [1.0, 0.8333333333333333]\n print(result[\"score\"])\n # 0.9166666666666666\n ```\n \"\"\"\n\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n ) -> Dict[str, Any]:\n \"\"\"\n Run the DocumentMAPEvaluator on the given inputs.\n\n All lists must have the same length.\n\n :param ground_truth_documents:\n A list of expected documents for each question.\n :param retrieved_documents:\n A list of retrieved documents for each question.\n :returns:\n A dictionary with the following outputs:\n - `score` - The average of calculated scores.\n - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked.\n \"\"\"\n if len(ground_truth_documents) != len(retrieved_documents):\n msg = \"The length of ground_truth_documents and retrieved_documents must be the same.\"\n raise ValueError(msg)\n\n individual_scores = []\n\n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n score = 0.0\n for ground_document in ground_truth:\n if ground_document.content is None:\n continue\n\n average_precision = 0.0\n relevant_documents = 0\n\n for rank, retrieved_document in enumerate(retrieved):\n if retrieved_document.content is None:\n continue\n\n if ground_document.content in retrieved_document.content:\n relevant_documents += 1\n average_precision += relevant_documents / (rank + 1)\n if relevant_documents > 0:\n score = average_precision / relevant_documents\n individual_scores.append(score)\n\n score = sum(individual_scores) / len(retrieved_documents)\n\n return {\"score\": score, \"individual_scores\": individual_scores}\n", "path": "haystack/components/evaluators/document_map.py"}], "after_files": [{"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentMRREvaluator:\n \"\"\"\n Evaluator that calculates the mean reciprocal rank of the retrieved documents.\n\n MRR measures how high the first retrieved document is ranked.\n Each question can have multiple ground truth documents and multiple retrieved documents.\n\n `DocumentMRREvaluator` doesn't normalize its inputs, the `DocumentCleaner` component\n should be used to clean and normalize the documents before passing them to this evaluator.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.evaluators import DocumentMRREvaluator\n\n evaluator = DocumentMRREvaluator()\n result = evaluator.run(\n ground_truth_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"9th\")],\n ],\n retrieved_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"10th century\"), Document(content=\"9th\")],\n ],\n )\n print(result[\"individual_scores\"])\n # [1.0, 1.0]\n print(result[\"score\"])\n # 1.0\n ```\n \"\"\"\n\n # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n ) -> Dict[str, Any]:\n \"\"\"\n Run the DocumentMRREvaluator on the given inputs.\n\n `ground_truth_documents` and `retrieved_documents` must have the same length.\n\n :param ground_truth_documents:\n A list of expected documents for each question.\n :param retrieved_documents:\n A list of retrieved documents for each question.\n :returns:\n A dictionary with the following outputs:\n - `score` - The average of calculated scores.\n - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high the first retrieved document is ranked.\n \"\"\"\n if len(ground_truth_documents) != len(retrieved_documents):\n msg = \"The length of ground_truth_documents and retrieved_documents must be the same.\"\n raise ValueError(msg)\n\n individual_scores = []\n\n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n reciprocal_rank = 0.0\n\n ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]\n for rank, retrieved_document in enumerate(retrieved):\n if retrieved_document.content is None:\n continue\n if retrieved_document.content in ground_truth_contents:\n reciprocal_rank = 1 / (rank + 1)\n break\n individual_scores.append(reciprocal_rank)\n\n score = sum(individual_scores) / len(ground_truth_documents)\n\n return {\"score\": score, \"individual_scores\": individual_scores}\n", "path": "haystack/components/evaluators/document_mrr.py"}, {"content": "# SPDX-FileCopyrightText: 2022-present deepset GmbH <[email protected]>\n#\n# SPDX-License-Identifier: Apache-2.0\n\nfrom typing import Any, Dict, List\n\nfrom haystack import Document, component\n\n\n@component\nclass DocumentMAPEvaluator:\n \"\"\"\n A Mean Average Precision (MAP) evaluator for documents.\n\n Evaluator that calculates the mean average precision of the retrieved documents, a metric\n that measures how high retrieved documents are ranked.\n Each question can have multiple ground truth documents and multiple retrieved documents.\n\n `DocumentMAPEvaluator` doesn't normalize its inputs, the `DocumentCleaner` component\n should be used to clean and normalize the documents before passing them to this evaluator.\n\n Usage example:\n ```python\n from haystack import Document\n from haystack.components.evaluators import DocumentMAPEvaluator\n\n evaluator = DocumentMAPEvaluator()\n result = evaluator.run(\n ground_truth_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"9th\")],\n ],\n retrieved_documents=[\n [Document(content=\"France\")],\n [Document(content=\"9th century\"), Document(content=\"10th century\"), Document(content=\"9th\")],\n ],\n )\n\n print(result[\"individual_scores\"])\n # [1.0, 0.8333333333333333]\n print(result[\"score\"])\n # 0.9166666666666666\n ```\n \"\"\"\n\n # Refer to https://www.pinecone.io/learn/offline-evaluation/ for the algorithm.\n @component.output_types(score=float, individual_scores=List[float])\n def run(\n self, ground_truth_documents: List[List[Document]], retrieved_documents: List[List[Document]]\n ) -> Dict[str, Any]:\n \"\"\"\n Run the DocumentMAPEvaluator on the given inputs.\n\n All lists must have the same length.\n\n :param ground_truth_documents:\n A list of expected documents for each question.\n :param retrieved_documents:\n A list of retrieved documents for each question.\n :returns:\n A dictionary with the following outputs:\n - `score` - The average of calculated scores.\n - `individual_scores` - A list of numbers from 0.0 to 1.0 that represents how high retrieved documents are ranked.\n \"\"\"\n if len(ground_truth_documents) != len(retrieved_documents):\n msg = \"The length of ground_truth_documents and retrieved_documents must be the same.\"\n raise ValueError(msg)\n\n individual_scores = []\n\n for ground_truth, retrieved in zip(ground_truth_documents, retrieved_documents):\n average_precision = 0.0\n average_precision_numerator = 0.0\n relevant_documents = 0\n\n ground_truth_contents = [doc.content for doc in ground_truth if doc.content is not None]\n for rank, retrieved_document in enumerate(retrieved):\n if retrieved_document.content is None:\n continue\n\n if retrieved_document.content in ground_truth_contents:\n relevant_documents += 1\n average_precision_numerator += relevant_documents / (rank + 1)\n if relevant_documents > 0:\n average_precision = average_precision_numerator / relevant_documents\n individual_scores.append(average_precision)\n\n score = sum(individual_scores) / len(ground_truth_documents)\n return {\"score\": score, \"individual_scores\": individual_scores}\n", "path": "haystack/components/evaluators/document_map.py"}]} | 2,261 | 907 |
gh_patches_debug_2272 | rasdani/github-patches | git_diff | python-pillow__Pillow-6481 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PSD incorrectly loaded
### What did you do?
I opened the TIFF in Pillow and converted it to JPG.
### What did you expect to happen?
The JPG image to look the same as the original TIFF.
### What actually happened?
The converted JPG looks malformed and has messed up colors.
### What are your OS, Python and Pillow versions?
* OS: Linux
* Python: 3.10.5
* Pillow: 9.1.1 (also tested -git)
```python
>>> img = Image.open("3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif")
>>> out_img = img.convert("RGB")
>>> out_img.save("converted.jpg", quality=95)
```
[original image](https://api.collectie.gent/storage/v1/download/3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif) (beware, 274MB)
[converted image](https://api.collectie.gent/storage/v1/download/3a029a4f48b480211286486a6a1f0f0b-transcode-OA_535_161_17_F_TE.jpg)
Is it okay to report this here or should I report this to the appropriate library (libtiff, jpeg-turbo, ?)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/PIL/PsdImagePlugin.py`
Content:
```
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # Adobe PSD 2.5/3.0 file handling
6 #
7 # History:
8 # 1995-09-01 fl Created
9 # 1997-01-03 fl Read most PSD images
10 # 1997-01-18 fl Fixed P and CMYK support
11 # 2001-10-21 fl Added seek/tell support (for layers)
12 #
13 # Copyright (c) 1997-2001 by Secret Labs AB.
14 # Copyright (c) 1995-2001 by Fredrik Lundh
15 #
16 # See the README file for information on usage and redistribution.
17 #
18
19 import io
20
21 from . import Image, ImageFile, ImagePalette
22 from ._binary import i8
23 from ._binary import i16be as i16
24 from ._binary import i32be as i32
25 from ._binary import si16be as si16
26
27 MODES = {
28 # (photoshop mode, bits) -> (pil mode, required channels)
29 (0, 1): ("1", 1),
30 (0, 8): ("L", 1),
31 (1, 8): ("L", 1),
32 (2, 8): ("P", 1),
33 (3, 8): ("RGB", 3),
34 (4, 8): ("CMYK", 4),
35 (7, 8): ("L", 1), # FIXME: multilayer
36 (8, 8): ("L", 1), # duotone
37 (9, 8): ("LAB", 3),
38 }
39
40
41 # --------------------------------------------------------------------.
42 # read PSD images
43
44
45 def _accept(prefix):
46 return prefix[:4] == b"8BPS"
47
48
49 ##
50 # Image plugin for Photoshop images.
51
52
53 class PsdImageFile(ImageFile.ImageFile):
54
55 format = "PSD"
56 format_description = "Adobe Photoshop"
57 _close_exclusive_fp_after_loading = False
58
59 def _open(self):
60
61 read = self.fp.read
62
63 #
64 # header
65
66 s = read(26)
67 if not _accept(s) or i16(s, 4) != 1:
68 raise SyntaxError("not a PSD file")
69
70 psd_bits = i16(s, 22)
71 psd_channels = i16(s, 12)
72 psd_mode = i16(s, 24)
73
74 mode, channels = MODES[(psd_mode, psd_bits)]
75
76 if channels > psd_channels:
77 raise OSError("not enough channels")
78
79 self.mode = mode
80 self._size = i32(s, 18), i32(s, 14)
81
82 #
83 # color mode data
84
85 size = i32(read(4))
86 if size:
87 data = read(size)
88 if mode == "P" and size == 768:
89 self.palette = ImagePalette.raw("RGB;L", data)
90
91 #
92 # image resources
93
94 self.resources = []
95
96 size = i32(read(4))
97 if size:
98 # load resources
99 end = self.fp.tell() + size
100 while self.fp.tell() < end:
101 read(4) # signature
102 id = i16(read(2))
103 name = read(i8(read(1)))
104 if not (len(name) & 1):
105 read(1) # padding
106 data = read(i32(read(4)))
107 if len(data) & 1:
108 read(1) # padding
109 self.resources.append((id, name, data))
110 if id == 1039: # ICC profile
111 self.info["icc_profile"] = data
112
113 #
114 # layer and mask information
115
116 self.layers = []
117
118 size = i32(read(4))
119 if size:
120 end = self.fp.tell() + size
121 size = i32(read(4))
122 if size:
123 _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))
124 self.layers = _layerinfo(_layer_data, size)
125 self.fp.seek(end)
126 self.n_frames = len(self.layers)
127 self.is_animated = self.n_frames > 1
128
129 #
130 # image descriptor
131
132 self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)
133
134 # keep the file open
135 self._fp = self.fp
136 self.frame = 1
137 self._min_frame = 1
138
139 def seek(self, layer):
140 if not self._seek_check(layer):
141 return
142
143 # seek to given layer (1..max)
144 try:
145 name, mode, bbox, tile = self.layers[layer - 1]
146 self.mode = mode
147 self.tile = tile
148 self.frame = layer
149 self.fp = self._fp
150 return name, bbox
151 except IndexError as e:
152 raise EOFError("no such layer") from e
153
154 def tell(self):
155 # return layer number (0=image, 1..max=layers)
156 return self.frame
157
158
159 def _layerinfo(fp, ct_bytes):
160 # read layerinfo block
161 layers = []
162
163 def read(size):
164 return ImageFile._safe_read(fp, size)
165
166 ct = si16(read(2))
167
168 # sanity check
169 if ct_bytes < (abs(ct) * 20):
170 raise SyntaxError("Layer block too short for number of layers requested")
171
172 for _ in range(abs(ct)):
173
174 # bounding box
175 y0 = i32(read(4))
176 x0 = i32(read(4))
177 y1 = i32(read(4))
178 x1 = i32(read(4))
179
180 # image info
181 mode = []
182 ct_types = i16(read(2))
183 types = list(range(ct_types))
184 if len(types) > 4:
185 continue
186
187 for _ in types:
188 type = i16(read(2))
189
190 if type == 65535:
191 m = "A"
192 else:
193 m = "RGBA"[type]
194
195 mode.append(m)
196 read(4) # size
197
198 # figure out the image mode
199 mode.sort()
200 if mode == ["R"]:
201 mode = "L"
202 elif mode == ["B", "G", "R"]:
203 mode = "RGB"
204 elif mode == ["A", "B", "G", "R"]:
205 mode = "RGBA"
206 else:
207 mode = None # unknown
208
209 # skip over blend flags and extra information
210 read(12) # filler
211 name = ""
212 size = i32(read(4)) # length of the extra data field
213 if size:
214 data_end = fp.tell() + size
215
216 length = i32(read(4))
217 if length:
218 fp.seek(length - 16, io.SEEK_CUR)
219
220 length = i32(read(4))
221 if length:
222 fp.seek(length, io.SEEK_CUR)
223
224 length = i8(read(1))
225 if length:
226 # Don't know the proper encoding,
227 # Latin-1 should be a good guess
228 name = read(length).decode("latin-1", "replace")
229
230 fp.seek(data_end)
231 layers.append((name, mode, (x0, y0, x1, y1)))
232
233 # get tiles
234 i = 0
235 for name, mode, bbox in layers:
236 tile = []
237 for m in mode:
238 t = _maketile(fp, m, bbox, 1)
239 if t:
240 tile.extend(t)
241 layers[i] = name, mode, bbox, tile
242 i += 1
243
244 return layers
245
246
247 def _maketile(file, mode, bbox, channels):
248
249 tile = None
250 read = file.read
251
252 compression = i16(read(2))
253
254 xsize = bbox[2] - bbox[0]
255 ysize = bbox[3] - bbox[1]
256
257 offset = file.tell()
258
259 if compression == 0:
260 #
261 # raw compression
262 tile = []
263 for channel in range(channels):
264 layer = mode[channel]
265 if mode == "CMYK":
266 layer += ";I"
267 tile.append(("raw", bbox, offset, layer))
268 offset = offset + xsize * ysize
269
270 elif compression == 1:
271 #
272 # packbits compression
273 i = 0
274 tile = []
275 bytecount = read(channels * ysize * 2)
276 offset = file.tell()
277 for channel in range(channels):
278 layer = mode[channel]
279 if mode == "CMYK":
280 layer += ";I"
281 tile.append(("packbits", bbox, offset, layer))
282 for y in range(ysize):
283 offset = offset + i16(bytecount, i)
284 i += 2
285
286 file.seek(offset)
287
288 if offset & 1:
289 read(1) # padding
290
291 return tile
292
293
294 # --------------------------------------------------------------------
295 # registry
296
297
298 Image.register_open(PsdImageFile.format, PsdImageFile, _accept)
299
300 Image.register_extension(PsdImageFile.format, ".psd")
301
302 Image.register_mime(PsdImageFile.format, "image/vnd.adobe.photoshop")
303
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py
--- a/src/PIL/PsdImagePlugin.py
+++ b/src/PIL/PsdImagePlugin.py
@@ -75,6 +75,9 @@
if channels > psd_channels:
raise OSError("not enough channels")
+ if mode == "RGB" and psd_channels == 4:
+ mode = "RGBA"
+ channels = 4
self.mode = mode
self._size = i32(s, 18), i32(s, 14)
| {"golden_diff": "diff --git a/src/PIL/PsdImagePlugin.py b/src/PIL/PsdImagePlugin.py\n--- a/src/PIL/PsdImagePlugin.py\n+++ b/src/PIL/PsdImagePlugin.py\n@@ -75,6 +75,9 @@\n \n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n+ if mode == \"RGB\" and psd_channels == 4:\n+ mode = \"RGBA\"\n+ channels = 4\n \n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n", "issue": "PSD incorrectly loaded\n### What did you do?\r\nI opened the TIFF in Pillow and converted it to JPG.\r\n### What did you expect to happen?\r\nThe JPG image to look the same as the original TIFF.\r\n### What actually happened?\r\nThe converted JPG looks malformed and has messed up colors.\r\n### What are your OS, Python and Pillow versions?\r\n\r\n* OS: Linux\r\n* Python: 3.10.5\r\n* Pillow: 9.1.1 (also tested -git)\r\n\r\n```python\r\n>>> img = Image.open(\"3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif\")\r\n>>> out_img = img.convert(\"RGB\")\r\n>>> out_img.save(\"converted.jpg\", quality=95)\r\n```\r\n[original image](https://api.collectie.gent/storage/v1/download/3662b8bd397337482862ab1a06bf3366-OA_535_161_17_F_TE.tif) (beware, 274MB)\r\n[converted image](https://api.collectie.gent/storage/v1/download/3a029a4f48b480211286486a6a1f0f0b-transcode-OA_535_161_17_F_TE.jpg)\r\n\r\nIs it okay to report this here or should I report this to the appropriate library (libtiff, jpeg-turbo, ?)\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8\nfrom ._binary import i16be as i16\nfrom ._binary import i32be as i32\nfrom ._binary import si16be as si16\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s, 4) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s, 22)\n psd_channels = i16(s, 12)\n psd_mode = i16(s, 24)\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n\n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))\n self.layers = _layerinfo(_layer_data, size)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self._fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self._fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n\ndef _layerinfo(fp, ct_bytes):\n # read layerinfo block\n layers = []\n\n def read(size):\n return ImageFile._safe_read(fp, size)\n\n ct = si16(read(2))\n\n # sanity check\n if ct_bytes < (abs(ct) * 20):\n raise SyntaxError(\"Layer block too short for number of layers requested\")\n\n for _ in range(abs(ct)):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n mode = []\n ct_types = i16(read(2))\n types = list(range(ct_types))\n if len(types) > 4:\n continue\n\n for _ in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n read(4) # size\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n if size:\n data_end = fp.tell() + size\n\n length = i32(read(4))\n if length:\n fp.seek(length - 16, io.SEEK_CUR)\n\n length = i32(read(4))\n if length:\n fp.seek(length, io.SEEK_CUR)\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n\n fp.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(fp, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount, i)\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n\nImage.register_mime(PsdImageFile.format, \"image/vnd.adobe.photoshop\")\n", "path": "src/PIL/PsdImagePlugin.py"}], "after_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# Adobe PSD 2.5/3.0 file handling\n#\n# History:\n# 1995-09-01 fl Created\n# 1997-01-03 fl Read most PSD images\n# 1997-01-18 fl Fixed P and CMYK support\n# 2001-10-21 fl Added seek/tell support (for layers)\n#\n# Copyright (c) 1997-2001 by Secret Labs AB.\n# Copyright (c) 1995-2001 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nimport io\n\nfrom . import Image, ImageFile, ImagePalette\nfrom ._binary import i8\nfrom ._binary import i16be as i16\nfrom ._binary import i32be as i32\nfrom ._binary import si16be as si16\n\nMODES = {\n # (photoshop mode, bits) -> (pil mode, required channels)\n (0, 1): (\"1\", 1),\n (0, 8): (\"L\", 1),\n (1, 8): (\"L\", 1),\n (2, 8): (\"P\", 1),\n (3, 8): (\"RGB\", 3),\n (4, 8): (\"CMYK\", 4),\n (7, 8): (\"L\", 1), # FIXME: multilayer\n (8, 8): (\"L\", 1), # duotone\n (9, 8): (\"LAB\", 3),\n}\n\n\n# --------------------------------------------------------------------.\n# read PSD images\n\n\ndef _accept(prefix):\n return prefix[:4] == b\"8BPS\"\n\n\n##\n# Image plugin for Photoshop images.\n\n\nclass PsdImageFile(ImageFile.ImageFile):\n\n format = \"PSD\"\n format_description = \"Adobe Photoshop\"\n _close_exclusive_fp_after_loading = False\n\n def _open(self):\n\n read = self.fp.read\n\n #\n # header\n\n s = read(26)\n if not _accept(s) or i16(s, 4) != 1:\n raise SyntaxError(\"not a PSD file\")\n\n psd_bits = i16(s, 22)\n psd_channels = i16(s, 12)\n psd_mode = i16(s, 24)\n\n mode, channels = MODES[(psd_mode, psd_bits)]\n\n if channels > psd_channels:\n raise OSError(\"not enough channels\")\n if mode == \"RGB\" and psd_channels == 4:\n mode = \"RGBA\"\n channels = 4\n\n self.mode = mode\n self._size = i32(s, 18), i32(s, 14)\n\n #\n # color mode data\n\n size = i32(read(4))\n if size:\n data = read(size)\n if mode == \"P\" and size == 768:\n self.palette = ImagePalette.raw(\"RGB;L\", data)\n\n #\n # image resources\n\n self.resources = []\n\n size = i32(read(4))\n if size:\n # load resources\n end = self.fp.tell() + size\n while self.fp.tell() < end:\n read(4) # signature\n id = i16(read(2))\n name = read(i8(read(1)))\n if not (len(name) & 1):\n read(1) # padding\n data = read(i32(read(4)))\n if len(data) & 1:\n read(1) # padding\n self.resources.append((id, name, data))\n if id == 1039: # ICC profile\n self.info[\"icc_profile\"] = data\n\n #\n # layer and mask information\n\n self.layers = []\n\n size = i32(read(4))\n if size:\n end = self.fp.tell() + size\n size = i32(read(4))\n if size:\n _layer_data = io.BytesIO(ImageFile._safe_read(self.fp, size))\n self.layers = _layerinfo(_layer_data, size)\n self.fp.seek(end)\n self.n_frames = len(self.layers)\n self.is_animated = self.n_frames > 1\n\n #\n # image descriptor\n\n self.tile = _maketile(self.fp, mode, (0, 0) + self.size, channels)\n\n # keep the file open\n self._fp = self.fp\n self.frame = 1\n self._min_frame = 1\n\n def seek(self, layer):\n if not self._seek_check(layer):\n return\n\n # seek to given layer (1..max)\n try:\n name, mode, bbox, tile = self.layers[layer - 1]\n self.mode = mode\n self.tile = tile\n self.frame = layer\n self.fp = self._fp\n return name, bbox\n except IndexError as e:\n raise EOFError(\"no such layer\") from e\n\n def tell(self):\n # return layer number (0=image, 1..max=layers)\n return self.frame\n\n\ndef _layerinfo(fp, ct_bytes):\n # read layerinfo block\n layers = []\n\n def read(size):\n return ImageFile._safe_read(fp, size)\n\n ct = si16(read(2))\n\n # sanity check\n if ct_bytes < (abs(ct) * 20):\n raise SyntaxError(\"Layer block too short for number of layers requested\")\n\n for _ in range(abs(ct)):\n\n # bounding box\n y0 = i32(read(4))\n x0 = i32(read(4))\n y1 = i32(read(4))\n x1 = i32(read(4))\n\n # image info\n mode = []\n ct_types = i16(read(2))\n types = list(range(ct_types))\n if len(types) > 4:\n continue\n\n for _ in types:\n type = i16(read(2))\n\n if type == 65535:\n m = \"A\"\n else:\n m = \"RGBA\"[type]\n\n mode.append(m)\n read(4) # size\n\n # figure out the image mode\n mode.sort()\n if mode == [\"R\"]:\n mode = \"L\"\n elif mode == [\"B\", \"G\", \"R\"]:\n mode = \"RGB\"\n elif mode == [\"A\", \"B\", \"G\", \"R\"]:\n mode = \"RGBA\"\n else:\n mode = None # unknown\n\n # skip over blend flags and extra information\n read(12) # filler\n name = \"\"\n size = i32(read(4)) # length of the extra data field\n if size:\n data_end = fp.tell() + size\n\n length = i32(read(4))\n if length:\n fp.seek(length - 16, io.SEEK_CUR)\n\n length = i32(read(4))\n if length:\n fp.seek(length, io.SEEK_CUR)\n\n length = i8(read(1))\n if length:\n # Don't know the proper encoding,\n # Latin-1 should be a good guess\n name = read(length).decode(\"latin-1\", \"replace\")\n\n fp.seek(data_end)\n layers.append((name, mode, (x0, y0, x1, y1)))\n\n # get tiles\n i = 0\n for name, mode, bbox in layers:\n tile = []\n for m in mode:\n t = _maketile(fp, m, bbox, 1)\n if t:\n tile.extend(t)\n layers[i] = name, mode, bbox, tile\n i += 1\n\n return layers\n\n\ndef _maketile(file, mode, bbox, channels):\n\n tile = None\n read = file.read\n\n compression = i16(read(2))\n\n xsize = bbox[2] - bbox[0]\n ysize = bbox[3] - bbox[1]\n\n offset = file.tell()\n\n if compression == 0:\n #\n # raw compression\n tile = []\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"raw\", bbox, offset, layer))\n offset = offset + xsize * ysize\n\n elif compression == 1:\n #\n # packbits compression\n i = 0\n tile = []\n bytecount = read(channels * ysize * 2)\n offset = file.tell()\n for channel in range(channels):\n layer = mode[channel]\n if mode == \"CMYK\":\n layer += \";I\"\n tile.append((\"packbits\", bbox, offset, layer))\n for y in range(ysize):\n offset = offset + i16(bytecount, i)\n i += 2\n\n file.seek(offset)\n\n if offset & 1:\n read(1) # padding\n\n return tile\n\n\n# --------------------------------------------------------------------\n# registry\n\n\nImage.register_open(PsdImageFile.format, PsdImageFile, _accept)\n\nImage.register_extension(PsdImageFile.format, \".psd\")\n\nImage.register_mime(PsdImageFile.format, \"image/vnd.adobe.photoshop\")\n", "path": "src/PIL/PsdImagePlugin.py"}]} | 3,517 | 132 |
gh_patches_debug_34741 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1251 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[PORT] Add Teams specific telemetry properties
> Port this change from botbuilder-dotnet/master branch:
https://github.com/microsoft/botbuilder-dotnet/pull/4256
Add Teams specific telemetry properties when activity received via the Teams channel.
See also https://github.com/microsoft/botframework-sdk/issues/5855
# Changed projects
* Microsoft.Bot.Builder
* Microsoft.Bot.Builder.Tests
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3 """Middleware Component for logging Activity messages."""
4
5 from typing import Awaitable, Callable, List, Dict
6 from botbuilder.schema import Activity, ConversationReference, ActivityTypes
7 from .bot_telemetry_client import BotTelemetryClient
8 from .bot_assert import BotAssert
9 from .middleware_set import Middleware
10 from .null_telemetry_client import NullTelemetryClient
11 from .turn_context import TurnContext
12 from .telemetry_constants import TelemetryConstants
13 from .telemetry_logger_constants import TelemetryLoggerConstants
14
15
16 # pylint: disable=line-too-long
17 class TelemetryLoggerMiddleware(Middleware):
18 """Middleware for logging incoming, outgoing, updated or deleted Activity messages."""
19
20 def __init__(
21 self, telemetry_client: BotTelemetryClient, log_personal_information: bool
22 ) -> None:
23 super(TelemetryLoggerMiddleware, self).__init__()
24 self._telemetry_client = telemetry_client or NullTelemetryClient()
25 self._log_personal_information = log_personal_information
26
27 @property
28 def telemetry_client(self) -> BotTelemetryClient:
29 """Gets the currently configured BotTelemetryClient."""
30 return self._telemetry_client
31
32 @property
33 def log_personal_information(self) -> bool:
34 """ Gets a value indicating whether determines whether to log personal
35 information that came from the user."""
36 return self._log_personal_information
37
38 # pylint: disable=arguments-differ
39 async def on_turn(
40 self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]
41 ) -> None:
42 """Logs events based on incoming and outgoing activities using
43 BotTelemetryClient base class
44
45 :param turn_context: The context object for this turn.
46 :param logic: Callable to continue the bot middleware pipeline
47
48 :return: None
49 """
50 BotAssert.context_not_none(context)
51
52 # Log incoming activity at beginning of turn
53 if context.activity:
54 activity = context.activity
55 # Log Bot Message Received
56 await self.on_receive_activity(activity)
57
58 # hook up onSend pipeline
59 # pylint: disable=unused-argument
60 async def send_activities_handler(
61 ctx: TurnContext,
62 activities: List[Activity],
63 next_send: Callable[[], Awaitable[None]],
64 ):
65 # Run full pipeline
66 responses = await next_send()
67 for activity in activities:
68 await self.on_send_activity(activity)
69 return responses
70
71 context.on_send_activities(send_activities_handler)
72
73 # hook up update activity pipeline
74 async def update_activity_handler(
75 ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]
76 ):
77 # Run full pipeline
78 response = await next_update()
79 await self.on_update_activity(activity)
80 return response
81
82 context.on_update_activity(update_activity_handler)
83
84 # hook up delete activity pipeline
85 async def delete_activity_handler(
86 ctx: TurnContext,
87 reference: ConversationReference,
88 next_delete: Callable[[], Awaitable],
89 ):
90 # Run full pipeline
91 await next_delete()
92
93 delete_msg = Activity(
94 type=ActivityTypes.message_delete, id=reference.activity_id
95 )
96 deleted_activity: Activity = TurnContext.apply_conversation_reference(
97 delete_msg, reference, False
98 )
99 await self.on_delete_activity(deleted_activity)
100
101 context.on_delete_activity(delete_activity_handler)
102
103 if logic_fn:
104 await logic_fn()
105
106 async def on_receive_activity(self, activity: Activity) -> None:
107 """Invoked when a message is received from the user.
108 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
109 This event name used is "BotMessageReceived".
110 :param activity: Current activity sent from user.
111 """
112 self.telemetry_client.track_event(
113 TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,
114 await self.fill_receive_event_properties(activity),
115 )
116
117 async def on_send_activity(self, activity: Activity) -> None:
118 """Invoked when the bot sends a message to the user.
119 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
120 This event name used is "BotMessageSend".
121 :param activity: Current activity sent from bot.
122 """
123 self.telemetry_client.track_event(
124 TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,
125 await self.fill_send_event_properties(activity),
126 )
127
128 async def on_update_activity(self, activity: Activity) -> None:
129 """Invoked when the bot updates a message.
130 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
131 This event name used is "BotMessageUpdate".
132 :param activity: Current activity sent from user.
133 """
134 self.telemetry_client.track_event(
135 TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,
136 await self.fill_update_event_properties(activity),
137 )
138
139 async def on_delete_activity(self, activity: Activity) -> None:
140 """Invoked when the bot deletes a message.
141 Performs logging of telemetry data using the BotTelemetryClient.track_event() method.
142 This event name used is "BotMessageDelete".
143 :param activity: Current activity sent from user.
144 """
145 self.telemetry_client.track_event(
146 TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,
147 await self.fill_delete_event_properties(activity),
148 )
149
150 async def fill_receive_event_properties(
151 self, activity: Activity, additional_properties: Dict[str, str] = None
152 ) -> Dict[str, str]:
153 """Fills the event properties for the BotMessageReceived.
154 Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.
155 :param activity: activity sent from user.
156 :param additional_properties: Additional properties to add to the event.
157 Additional properties can override "stock" properties.
158
159 :return: A dictionary that is sent as "Properties" to
160 BotTelemetryClient.track_event method for the BotMessageReceived event.
161 """
162 properties = {
163 TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id
164 if activity.from_property
165 else None,
166 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
167 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
168 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
169 TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,
170 }
171
172 if self.log_personal_information:
173 if (
174 activity.from_property
175 and activity.from_property.name
176 and activity.from_property.name.strip()
177 ):
178 properties[
179 TelemetryConstants.FROM_NAME_PROPERTY
180 ] = activity.from_property.name
181 if activity.text and activity.text.strip():
182 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
183 if activity.speak and activity.speak.strip():
184 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
185
186 # Additional properties can override "stock" properties
187 if additional_properties:
188 for prop in additional_properties:
189 properties[prop.key] = prop.value
190
191 return properties
192
193 async def fill_send_event_properties(
194 self, activity: Activity, additional_properties: Dict[str, str] = None
195 ) -> Dict[str, str]:
196 """Fills the event properties for the BotMessageSend.
197 These properties are logged when an activity message is sent by the Bot to the user.
198 :param activity: activity sent from user.
199 :param additional_properties: Additional properties to add to the event.
200 Additional properties can override "stock" properties.
201
202 :return: A dictionary that is sent as "Properties" to the
203 BotTelemetryClient.track_event method for the BotMessageSend event.
204 """
205 properties = {
206 TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,
207 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
208 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
209 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
210 }
211
212 # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples
213 if self.log_personal_information:
214 if activity.attachments and activity.attachments.strip():
215 properties[
216 TelemetryConstants.ATTACHMENTS_PROPERTY
217 ] = activity.attachments
218 if activity.from_property.name and activity.from_property.name.strip():
219 properties[
220 TelemetryConstants.FROM_NAME_PROPERTY
221 ] = activity.from_property.name
222 if activity.text and activity.text.strip():
223 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
224 if activity.speak and activity.speak.strip():
225 properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
226
227 # Additional properties can override "stock" properties
228 if additional_properties:
229 for prop in additional_properties:
230 properties[prop.key] = prop.value
231
232 return properties
233
234 async def fill_update_event_properties(
235 self, activity: Activity, additional_properties: Dict[str, str] = None
236 ) -> Dict[str, str]:
237 """Fills the event properties for the BotMessageUpdate.
238 These properties are logged when an activity message is updated by the Bot.
239 For example, if a card is interacted with by the use, and the card needs
240 to be updated to reflect some interaction.
241 :param activity: activity sent from user.
242 :param additional_properties: Additional properties to add to the event.
243 Additional properties can override "stock" properties.
244
245 :return: A dictionary that is sent as "Properties" to the
246 BotTelemetryClient.track_event method for the BotMessageUpdate event.
247 """
248 properties = {
249 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
250 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
251 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
252 TelemetryConstants.LOCALE_PROPERTY: activity.locale,
253 }
254
255 # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples
256 if self.log_personal_information:
257 if activity.text and activity.text.strip():
258 properties[TelemetryConstants.TEXT_PROPERTY] = activity.text
259
260 # Additional properties can override "stock" properties
261 if additional_properties:
262 for prop in additional_properties:
263 properties[prop.key] = prop.value
264
265 return properties
266
267 async def fill_delete_event_properties(
268 self, activity: Activity, additional_properties: Dict[str, str] = None
269 ) -> Dict[str, str]:
270 """Fills the event properties for the BotMessageDelete.
271 These properties are logged when an activity message is deleted by the Bot.
272 :param activity: activity sent from user.
273 :param additional_properties: Additional properties to add to the event.
274 Additional properties can override "stock" properties.
275
276 :return: A dictionary that is sent as "Properties" to the
277 BotTelemetryClient.track_event method for the BotMessageUpdate event.
278 """
279 properties = {
280 TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,
281 TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,
282 TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,
283 }
284
285 # Additional properties can override "stock" properties
286 if additional_properties:
287 for prop in additional_properties:
288 properties[prop.key] = prop.value
289
290 return properties
291
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py
@@ -1,9 +1,11 @@
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Middleware Component for logging Activity messages."""
-
from typing import Awaitable, Callable, List, Dict
from botbuilder.schema import Activity, ConversationReference, ActivityTypes
+from botbuilder.schema.teams import TeamsChannelData, TeamInfo
+from botframework.connector import Channels
+
from .bot_telemetry_client import BotTelemetryClient
from .bot_assert import BotAssert
from .middleware_set import Middleware
@@ -183,6 +185,10 @@
if activity.speak and activity.speak.strip():
properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak
+ TelemetryLoggerMiddleware.__populate_additional_channel_properties(
+ activity, properties
+ )
+
# Additional properties can override "stock" properties
if additional_properties:
for prop in additional_properties:
@@ -288,3 +294,25 @@
properties[prop.key] = prop.value
return properties
+
+ @staticmethod
+ def __populate_additional_channel_properties(
+ activity: Activity, properties: dict,
+ ):
+ if activity.channel_id == Channels.ms_teams:
+ teams_channel_data: TeamsChannelData = activity.channel_data
+
+ properties["TeamsTenantId"] = (
+ teams_channel_data.tenant
+ if teams_channel_data and teams_channel_data.tenant
+ else ""
+ )
+
+ properties["TeamsUserAadObjectId"] = (
+ activity.from_property.aad_object_id if activity.from_property else ""
+ )
+
+ if teams_channel_data and teams_channel_data.team:
+ properties["TeamsTeamInfo"] = TeamInfo.serialize(
+ teams_channel_data.team
+ )
| {"golden_diff": "diff --git a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n--- a/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n+++ b/libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py\n@@ -1,9 +1,11 @@\n # Copyright (c) Microsoft Corporation. All rights reserved.\n # Licensed under the MIT License.\n \"\"\"Middleware Component for logging Activity messages.\"\"\"\n-\n from typing import Awaitable, Callable, List, Dict\n from botbuilder.schema import Activity, ConversationReference, ActivityTypes\n+from botbuilder.schema.teams import TeamsChannelData, TeamInfo\n+from botframework.connector import Channels\n+\n from .bot_telemetry_client import BotTelemetryClient\n from .bot_assert import BotAssert\n from .middleware_set import Middleware\n@@ -183,6 +185,10 @@\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n \n+ TelemetryLoggerMiddleware.__populate_additional_channel_properties(\n+ activity, properties\n+ )\n+\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n@@ -288,3 +294,25 @@\n properties[prop.key] = prop.value\n \n return properties\n+\n+ @staticmethod\n+ def __populate_additional_channel_properties(\n+ activity: Activity, properties: dict,\n+ ):\n+ if activity.channel_id == Channels.ms_teams:\n+ teams_channel_data: TeamsChannelData = activity.channel_data\n+\n+ properties[\"TeamsTenantId\"] = (\n+ teams_channel_data.tenant\n+ if teams_channel_data and teams_channel_data.tenant\n+ else \"\"\n+ )\n+\n+ properties[\"TeamsUserAadObjectId\"] = (\n+ activity.from_property.aad_object_id if activity.from_property else \"\"\n+ )\n+\n+ if teams_channel_data and teams_channel_data.team:\n+ properties[\"TeamsTeamInfo\"] = TeamInfo.serialize(\n+ teams_channel_data.team\n+ )\n", "issue": "[PORT] Add Teams specific telemetry properties \n> Port this change from botbuilder-dotnet/master branch:\r\nhttps://github.com/microsoft/botbuilder-dotnet/pull/4256\r\n\r\nAdd Teams specific telemetry properties when activity received via the Teams channel.\r\n\r\nSee also https://github.com/microsoft/botframework-sdk/issues/5855\r\n\r\n\r\n\r\n# Changed projects\r\n* Microsoft.Bot.Builder\r\n* Microsoft.Bot.Builder.Tests\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Middleware Component for logging Activity messages.\"\"\"\n\nfrom typing import Awaitable, Callable, List, Dict\nfrom botbuilder.schema import Activity, ConversationReference, ActivityTypes\nfrom .bot_telemetry_client import BotTelemetryClient\nfrom .bot_assert import BotAssert\nfrom .middleware_set import Middleware\nfrom .null_telemetry_client import NullTelemetryClient\nfrom .turn_context import TurnContext\nfrom .telemetry_constants import TelemetryConstants\nfrom .telemetry_logger_constants import TelemetryLoggerConstants\n\n\n# pylint: disable=line-too-long\nclass TelemetryLoggerMiddleware(Middleware):\n \"\"\"Middleware for logging incoming, outgoing, updated or deleted Activity messages.\"\"\"\n\n def __init__(\n self, telemetry_client: BotTelemetryClient, log_personal_information: bool\n ) -> None:\n super(TelemetryLoggerMiddleware, self).__init__()\n self._telemetry_client = telemetry_client or NullTelemetryClient()\n self._log_personal_information = log_personal_information\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"Gets the currently configured BotTelemetryClient.\"\"\"\n return self._telemetry_client\n\n @property\n def log_personal_information(self) -> bool:\n \"\"\" Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n\n # pylint: disable=arguments-differ\n async def on_turn(\n self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]\n ) -> None:\n \"\"\"Logs events based on incoming and outgoing activities using\n BotTelemetryClient base class\n\n :param turn_context: The context object for this turn.\n :param logic: Callable to continue the bot middleware pipeline\n\n :return: None\n \"\"\"\n BotAssert.context_not_none(context)\n\n # Log incoming activity at beginning of turn\n if context.activity:\n activity = context.activity\n # Log Bot Message Received\n await self.on_receive_activity(activity)\n\n # hook up onSend pipeline\n # pylint: disable=unused-argument\n async def send_activities_handler(\n ctx: TurnContext,\n activities: List[Activity],\n next_send: Callable[[], Awaitable[None]],\n ):\n # Run full pipeline\n responses = await next_send()\n for activity in activities:\n await self.on_send_activity(activity)\n return responses\n\n context.on_send_activities(send_activities_handler)\n\n # hook up update activity pipeline\n async def update_activity_handler(\n ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]\n ):\n # Run full pipeline\n response = await next_update()\n await self.on_update_activity(activity)\n return response\n\n context.on_update_activity(update_activity_handler)\n\n # hook up delete activity pipeline\n async def delete_activity_handler(\n ctx: TurnContext,\n reference: ConversationReference,\n next_delete: Callable[[], Awaitable],\n ):\n # Run full pipeline\n await next_delete()\n\n delete_msg = Activity(\n type=ActivityTypes.message_delete, id=reference.activity_id\n )\n deleted_activity: Activity = TurnContext.apply_conversation_reference(\n delete_msg, reference, False\n )\n await self.on_delete_activity(deleted_activity)\n\n context.on_delete_activity(delete_activity_handler)\n\n if logic_fn:\n await logic_fn()\n\n async def on_receive_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when a message is received from the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageReceived\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,\n await self.fill_receive_event_properties(activity),\n )\n\n async def on_send_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot sends a message to the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageSend\".\n :param activity: Current activity sent from bot.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,\n await self.fill_send_event_properties(activity),\n )\n\n async def on_update_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot updates a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageUpdate\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,\n await self.fill_update_event_properties(activity),\n )\n\n async def on_delete_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot deletes a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageDelete\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,\n await self.fill_delete_event_properties(activity),\n )\n\n async def fill_receive_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageReceived.\n Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id\n if activity.from_property\n else None,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,\n }\n\n if self.log_personal_information:\n if (\n activity.from_property\n and activity.from_property.name\n and activity.from_property.name.strip()\n ):\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_send_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageSend.\n These properties are logged when an activity message is sent by the Bot to the user.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageSend event.\n \"\"\"\n properties = {\n TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n if activity.attachments and activity.attachments.strip():\n properties[\n TelemetryConstants.ATTACHMENTS_PROPERTY\n ] = activity.attachments\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_update_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageUpdate.\n These properties are logged when an activity message is updated by the Bot.\n For example, if a card is interacted with by the use, and the card needs\n to be updated to reflect some interaction.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples\n if self.log_personal_information:\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_delete_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageDelete.\n These properties are logged when an activity message is deleted by the Bot.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n }\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n", "path": "libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\"\"\"Middleware Component for logging Activity messages.\"\"\"\nfrom typing import Awaitable, Callable, List, Dict\nfrom botbuilder.schema import Activity, ConversationReference, ActivityTypes\nfrom botbuilder.schema.teams import TeamsChannelData, TeamInfo\nfrom botframework.connector import Channels\n\nfrom .bot_telemetry_client import BotTelemetryClient\nfrom .bot_assert import BotAssert\nfrom .middleware_set import Middleware\nfrom .null_telemetry_client import NullTelemetryClient\nfrom .turn_context import TurnContext\nfrom .telemetry_constants import TelemetryConstants\nfrom .telemetry_logger_constants import TelemetryLoggerConstants\n\n\n# pylint: disable=line-too-long\nclass TelemetryLoggerMiddleware(Middleware):\n \"\"\"Middleware for logging incoming, outgoing, updated or deleted Activity messages.\"\"\"\n\n def __init__(\n self, telemetry_client: BotTelemetryClient, log_personal_information: bool\n ) -> None:\n super(TelemetryLoggerMiddleware, self).__init__()\n self._telemetry_client = telemetry_client or NullTelemetryClient()\n self._log_personal_information = log_personal_information\n\n @property\n def telemetry_client(self) -> BotTelemetryClient:\n \"\"\"Gets the currently configured BotTelemetryClient.\"\"\"\n return self._telemetry_client\n\n @property\n def log_personal_information(self) -> bool:\n \"\"\" Gets a value indicating whether determines whether to log personal\n information that came from the user.\"\"\"\n return self._log_personal_information\n\n # pylint: disable=arguments-differ\n async def on_turn(\n self, context: TurnContext, logic_fn: Callable[[TurnContext], Awaitable]\n ) -> None:\n \"\"\"Logs events based on incoming and outgoing activities using\n BotTelemetryClient base class\n\n :param turn_context: The context object for this turn.\n :param logic: Callable to continue the bot middleware pipeline\n\n :return: None\n \"\"\"\n BotAssert.context_not_none(context)\n\n # Log incoming activity at beginning of turn\n if context.activity:\n activity = context.activity\n # Log Bot Message Received\n await self.on_receive_activity(activity)\n\n # hook up onSend pipeline\n # pylint: disable=unused-argument\n async def send_activities_handler(\n ctx: TurnContext,\n activities: List[Activity],\n next_send: Callable[[], Awaitable[None]],\n ):\n # Run full pipeline\n responses = await next_send()\n for activity in activities:\n await self.on_send_activity(activity)\n return responses\n\n context.on_send_activities(send_activities_handler)\n\n # hook up update activity pipeline\n async def update_activity_handler(\n ctx: TurnContext, activity: Activity, next_update: Callable[[], Awaitable]\n ):\n # Run full pipeline\n response = await next_update()\n await self.on_update_activity(activity)\n return response\n\n context.on_update_activity(update_activity_handler)\n\n # hook up delete activity pipeline\n async def delete_activity_handler(\n ctx: TurnContext,\n reference: ConversationReference,\n next_delete: Callable[[], Awaitable],\n ):\n # Run full pipeline\n await next_delete()\n\n delete_msg = Activity(\n type=ActivityTypes.message_delete, id=reference.activity_id\n )\n deleted_activity: Activity = TurnContext.apply_conversation_reference(\n delete_msg, reference, False\n )\n await self.on_delete_activity(deleted_activity)\n\n context.on_delete_activity(delete_activity_handler)\n\n if logic_fn:\n await logic_fn()\n\n async def on_receive_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when a message is received from the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageReceived\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_RECEIVE_EVENT,\n await self.fill_receive_event_properties(activity),\n )\n\n async def on_send_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot sends a message to the user.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageSend\".\n :param activity: Current activity sent from bot.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_SEND_EVENT,\n await self.fill_send_event_properties(activity),\n )\n\n async def on_update_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot updates a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageUpdate\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_UPDATE_EVENT,\n await self.fill_update_event_properties(activity),\n )\n\n async def on_delete_activity(self, activity: Activity) -> None:\n \"\"\"Invoked when the bot deletes a message.\n Performs logging of telemetry data using the BotTelemetryClient.track_event() method.\n This event name used is \"BotMessageDelete\".\n :param activity: Current activity sent from user.\n \"\"\"\n self.telemetry_client.track_event(\n TelemetryLoggerConstants.BOT_MSG_DELETE_EVENT,\n await self.fill_delete_event_properties(activity),\n )\n\n async def fill_receive_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageReceived.\n Adheres to the LogPersonalInformation flag to filter Name, Text and Speak properties.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to\n BotTelemetryClient.track_event method for the BotMessageReceived event.\n \"\"\"\n properties = {\n TelemetryConstants.FROM_ID_PROPERTY: activity.from_property.id\n if activity.from_property\n else None,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.RECIPIENT_NAME_PROPERTY: activity.recipient.name,\n }\n\n if self.log_personal_information:\n if (\n activity.from_property\n and activity.from_property.name\n and activity.from_property.name.strip()\n ):\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n TelemetryLoggerMiddleware.__populate_additional_channel_properties(\n activity, properties\n )\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_send_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageSend.\n These properties are logged when an activity message is sent by the Bot to the user.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageSend event.\n \"\"\"\n properties = {\n TelemetryConstants.REPLY_ACTIVITY_ID_PROPERTY: activity.reply_to_id,\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text and user name are common examples\n if self.log_personal_information:\n if activity.attachments and activity.attachments.strip():\n properties[\n TelemetryConstants.ATTACHMENTS_PROPERTY\n ] = activity.attachments\n if activity.from_property.name and activity.from_property.name.strip():\n properties[\n TelemetryConstants.FROM_NAME_PROPERTY\n ] = activity.from_property.name\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n if activity.speak and activity.speak.strip():\n properties[TelemetryConstants.SPEAK_PROPERTY] = activity.speak\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_update_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageUpdate.\n These properties are logged when an activity message is updated by the Bot.\n For example, if a card is interacted with by the use, and the card needs\n to be updated to reflect some interaction.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n TelemetryConstants.LOCALE_PROPERTY: activity.locale,\n }\n\n # Use the LogPersonalInformation flag to toggle logging PII data, text is a common examples\n if self.log_personal_information:\n if activity.text and activity.text.strip():\n properties[TelemetryConstants.TEXT_PROPERTY] = activity.text\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n async def fill_delete_event_properties(\n self, activity: Activity, additional_properties: Dict[str, str] = None\n ) -> Dict[str, str]:\n \"\"\"Fills the event properties for the BotMessageDelete.\n These properties are logged when an activity message is deleted by the Bot.\n :param activity: activity sent from user.\n :param additional_properties: Additional properties to add to the event.\n Additional properties can override \"stock\" properties.\n\n :return: A dictionary that is sent as \"Properties\" to the\n BotTelemetryClient.track_event method for the BotMessageUpdate event.\n \"\"\"\n properties = {\n TelemetryConstants.RECIPIENT_ID_PROPERTY: activity.recipient.id,\n TelemetryConstants.CONVERSATION_ID_PROPERTY: activity.conversation.id,\n TelemetryConstants.CONVERSATION_NAME_PROPERTY: activity.conversation.name,\n }\n\n # Additional properties can override \"stock\" properties\n if additional_properties:\n for prop in additional_properties:\n properties[prop.key] = prop.value\n\n return properties\n\n @staticmethod\n def __populate_additional_channel_properties(\n activity: Activity, properties: dict,\n ):\n if activity.channel_id == Channels.ms_teams:\n teams_channel_data: TeamsChannelData = activity.channel_data\n\n properties[\"TeamsTenantId\"] = (\n teams_channel_data.tenant\n if teams_channel_data and teams_channel_data.tenant\n else \"\"\n )\n\n properties[\"TeamsUserAadObjectId\"] = (\n activity.from_property.aad_object_id if activity.from_property else \"\"\n )\n\n if teams_channel_data and teams_channel_data.team:\n properties[\"TeamsTeamInfo\"] = TeamInfo.serialize(\n teams_channel_data.team\n )\n", "path": "libraries/botbuilder-core/botbuilder/core/telemetry_logger_middleware.py"}]} | 3,539 | 470 |
gh_patches_debug_7326 | rasdani/github-patches | git_diff | microsoft__ptvsd-619 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Launching a Python module using `-m` with `ptvsd.enable_attach` in __init__ fails
## Environment data
- PTVSD version: Master
- OS and version: Mac (may not be OS specific)
- Python version (& distribution if applicable, e.g. Anaconda): 3.6
- Using VS Code or Visual Studio:N/A
## Actual behavior
```
Traceback (most recent call last):
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/tests/resources/system_tests/test_exceptions/mymod_attach1/__init__.py", line 4, in <module>
ptvsd.enable_attach((sys.argv[1], sys.argv[2]))
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/attach_server.py", line 71, in enable_attach
redirect_output=redirect_output,
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/_remote.py", line 27, in enable_attach
**kwargs
File "/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd_hooks.py", line 95, in install
if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:
AttributeError: module '__main__' has no attribute '__file__'
```
## Expected behavior
Should not error out
## Steps to reproduce:
* Create a python module
* Add the following code in the `__init__.py`
```python
import sys
import ptvsd
ptvsd.enable_attach(('localhost', 9876))
ptvsd.wait_for_attach()
try:
raise ArithmeticError('Hello')
except Exception:
pass
sys.stdout.write('end')
```
* Start the module `python -m xyz`
That's when it goes kaboom
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/pydevd_hooks.py`
Content:
```
1 import sys
2
3 from _pydevd_bundle import pydevd_comm
4
5 from ptvsd.socket import Address
6 from ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError
7 from ptvsd._util import debug, new_hidden_thread
8
9
10 def start_server(daemon, host, port, **kwargs):
11 """Return a socket to a (new) local pydevd-handling daemon.
12
13 The daemon supports the pydevd client wire protocol, sending
14 requests and handling responses (and events).
15
16 This is a replacement for _pydevd_bundle.pydevd_comm.start_server.
17 """
18 sock, next_session = daemon.start_server((host, port))
19
20 def handle_next():
21 try:
22 session = next_session(**kwargs)
23 debug('done waiting')
24 return session
25 except (DaemonClosedError, DaemonStoppedError):
26 # Typically won't happen.
27 debug('stopped')
28 raise
29 except Exception as exc:
30 # TODO: log this?
31 debug('failed:', exc, tb=True)
32 return None
33
34 while True:
35 debug('waiting on initial connection')
36 handle_next()
37 break
38
39 def serve_forever():
40 while True:
41 debug('waiting on next connection')
42 try:
43 handle_next()
44 except (DaemonClosedError, DaemonStoppedError):
45 break
46 debug('done')
47
48 t = new_hidden_thread(
49 target=serve_forever,
50 name='sessions',
51 )
52 t.start()
53 return sock
54
55
56 def start_client(daemon, host, port, **kwargs):
57 """Return a socket to an existing "remote" pydevd-handling daemon.
58
59 The daemon supports the pydevd client wire protocol, sending
60 requests and handling responses (and events).
61
62 This is a replacement for _pydevd_bundle.pydevd_comm.start_client.
63 """
64 sock, start_session = daemon.start_client((host, port))
65 start_session(**kwargs)
66 return sock
67
68
69 def install(pydevd, address,
70 start_server=start_server, start_client=start_client,
71 **kwargs):
72 """Configure pydevd to use our wrapper.
73
74 This is a bit of a hack to allow us to run our VSC debug adapter
75 in the same process as pydevd. Note that, as with most hacks,
76 this is somewhat fragile (since the monkeypatching sites may
77 change).
78 """
79 addr = Address.from_raw(address)
80 daemon = Daemon(**kwargs)
81
82 _start_server = (lambda p: start_server(daemon, addr.host, p))
83 _start_server.orig = start_server
84 _start_client = (lambda h, p: start_client(daemon, h, p))
85 _start_client.orig = start_client
86
87 # These are the functions pydevd invokes to get a socket to the client.
88 pydevd_comm.start_server = _start_server
89 pydevd_comm.start_client = _start_client
90
91 # Ensure that pydevd is using our functions.
92 pydevd.start_server = _start_server
93 pydevd.start_client = _start_client
94 __main__ = sys.modules['__main__']
95 if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:
96 __main__.start_server = _start_server
97 __main__.start_client = _start_client
98 return daemon
99
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ptvsd/pydevd_hooks.py b/ptvsd/pydevd_hooks.py
--- a/ptvsd/pydevd_hooks.py
+++ b/ptvsd/pydevd_hooks.py
@@ -92,7 +92,8 @@
pydevd.start_server = _start_server
pydevd.start_client = _start_client
__main__ = sys.modules['__main__']
- if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:
- __main__.start_server = _start_server
- __main__.start_client = _start_client
+ if __main__ is not pydevd:
+ if getattr(__main__, '__file__', None) == pydevd.__file__:
+ __main__.start_server = _start_server
+ __main__.start_client = _start_client
return daemon
| {"golden_diff": "diff --git a/ptvsd/pydevd_hooks.py b/ptvsd/pydevd_hooks.py\n--- a/ptvsd/pydevd_hooks.py\n+++ b/ptvsd/pydevd_hooks.py\n@@ -92,7 +92,8 @@\n pydevd.start_server = _start_server\n pydevd.start_client = _start_client\n __main__ = sys.modules['__main__']\n- if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:\n- __main__.start_server = _start_server\n- __main__.start_client = _start_client\n+ if __main__ is not pydevd:\n+ if getattr(__main__, '__file__', None) == pydevd.__file__:\n+ __main__.start_server = _start_server\n+ __main__.start_client = _start_client\n return daemon\n", "issue": "Launching a Python module using `-m` with `ptvsd.enable_attach` in __init__ fails\n## Environment data\r\n\r\n- PTVSD version: Master\r\n- OS and version: Mac (may not be OS specific)\r\n- Python version (& distribution if applicable, e.g. Anaconda): 3.6\r\n- Using VS Code or Visual Studio:N/A\r\n\r\n## Actual behavior\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/tests/resources/system_tests/test_exceptions/mymod_attach1/__init__.py\", line 4, in <module>\r\n ptvsd.enable_attach((sys.argv[1], sys.argv[2]))\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/attach_server.py\", line 71, in enable_attach\r\n redirect_output=redirect_output,\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/_remote.py\", line 27, in enable_attach\r\n **kwargs\r\n File \"/Users/donjayamanne/Desktop/Development/vscode/ptvsd/ptvsd/pydevd_hooks.py\", line 95, in install\r\n if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:\r\nAttributeError: module '__main__' has no attribute '__file__'\r\n```\r\n\r\n## Expected behavior\r\n\r\nShould not error out\r\n\r\n## Steps to reproduce:\r\n\r\n* Create a python module\r\n* Add the following code in the `__init__.py`\r\n```python\r\nimport sys\r\nimport ptvsd\r\nptvsd.enable_attach(('localhost', 9876))\r\nptvsd.wait_for_attach()\r\n\r\ntry:\r\n raise ArithmeticError('Hello')\r\nexcept Exception:\r\n pass\r\nsys.stdout.write('end')\r\n```\r\n* Start the module `python -m xyz`\r\nThat's when it goes kaboom\n", "before_files": [{"content": "import sys\n\nfrom _pydevd_bundle import pydevd_comm\n\nfrom ptvsd.socket import Address\nfrom ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError\nfrom ptvsd._util import debug, new_hidden_thread\n\n\ndef start_server(daemon, host, port, **kwargs):\n \"\"\"Return a socket to a (new) local pydevd-handling daemon.\n\n The daemon supports the pydevd client wire protocol, sending\n requests and handling responses (and events).\n\n This is a replacement for _pydevd_bundle.pydevd_comm.start_server.\n \"\"\"\n sock, next_session = daemon.start_server((host, port))\n\n def handle_next():\n try:\n session = next_session(**kwargs)\n debug('done waiting')\n return session\n except (DaemonClosedError, DaemonStoppedError):\n # Typically won't happen.\n debug('stopped')\n raise\n except Exception as exc:\n # TODO: log this?\n debug('failed:', exc, tb=True)\n return None\n\n while True:\n debug('waiting on initial connection')\n handle_next()\n break\n\n def serve_forever():\n while True:\n debug('waiting on next connection')\n try:\n handle_next()\n except (DaemonClosedError, DaemonStoppedError):\n break\n debug('done')\n\n t = new_hidden_thread(\n target=serve_forever,\n name='sessions',\n )\n t.start()\n return sock\n\n\ndef start_client(daemon, host, port, **kwargs):\n \"\"\"Return a socket to an existing \"remote\" pydevd-handling daemon.\n\n The daemon supports the pydevd client wire protocol, sending\n requests and handling responses (and events).\n\n This is a replacement for _pydevd_bundle.pydevd_comm.start_client.\n \"\"\"\n sock, start_session = daemon.start_client((host, port))\n start_session(**kwargs)\n return sock\n\n\ndef install(pydevd, address,\n start_server=start_server, start_client=start_client,\n **kwargs):\n \"\"\"Configure pydevd to use our wrapper.\n\n This is a bit of a hack to allow us to run our VSC debug adapter\n in the same process as pydevd. Note that, as with most hacks,\n this is somewhat fragile (since the monkeypatching sites may\n change).\n \"\"\"\n addr = Address.from_raw(address)\n daemon = Daemon(**kwargs)\n\n _start_server = (lambda p: start_server(daemon, addr.host, p))\n _start_server.orig = start_server\n _start_client = (lambda h, p: start_client(daemon, h, p))\n _start_client.orig = start_client\n\n # These are the functions pydevd invokes to get a socket to the client.\n pydevd_comm.start_server = _start_server\n pydevd_comm.start_client = _start_client\n\n # Ensure that pydevd is using our functions.\n pydevd.start_server = _start_server\n pydevd.start_client = _start_client\n __main__ = sys.modules['__main__']\n if __main__ is not pydevd and __main__.__file__ == pydevd.__file__:\n __main__.start_server = _start_server\n __main__.start_client = _start_client\n return daemon\n", "path": "ptvsd/pydevd_hooks.py"}], "after_files": [{"content": "import sys\n\nfrom _pydevd_bundle import pydevd_comm\n\nfrom ptvsd.socket import Address\nfrom ptvsd.daemon import Daemon, DaemonStoppedError, DaemonClosedError\nfrom ptvsd._util import debug, new_hidden_thread\n\n\ndef start_server(daemon, host, port, **kwargs):\n \"\"\"Return a socket to a (new) local pydevd-handling daemon.\n\n The daemon supports the pydevd client wire protocol, sending\n requests and handling responses (and events).\n\n This is a replacement for _pydevd_bundle.pydevd_comm.start_server.\n \"\"\"\n sock, next_session = daemon.start_server((host, port))\n\n def handle_next():\n try:\n session = next_session(**kwargs)\n debug('done waiting')\n return session\n except (DaemonClosedError, DaemonStoppedError):\n # Typically won't happen.\n debug('stopped')\n raise\n except Exception as exc:\n # TODO: log this?\n debug('failed:', exc, tb=True)\n return None\n\n while True:\n debug('waiting on initial connection')\n handle_next()\n break\n\n def serve_forever():\n while True:\n debug('waiting on next connection')\n try:\n handle_next()\n except (DaemonClosedError, DaemonStoppedError):\n break\n debug('done')\n\n t = new_hidden_thread(\n target=serve_forever,\n name='sessions',\n )\n t.start()\n return sock\n\n\ndef start_client(daemon, host, port, **kwargs):\n \"\"\"Return a socket to an existing \"remote\" pydevd-handling daemon.\n\n The daemon supports the pydevd client wire protocol, sending\n requests and handling responses (and events).\n\n This is a replacement for _pydevd_bundle.pydevd_comm.start_client.\n \"\"\"\n sock, start_session = daemon.start_client((host, port))\n start_session(**kwargs)\n return sock\n\n\ndef install(pydevd, address,\n start_server=start_server, start_client=start_client,\n **kwargs):\n \"\"\"Configure pydevd to use our wrapper.\n\n This is a bit of a hack to allow us to run our VSC debug adapter\n in the same process as pydevd. Note that, as with most hacks,\n this is somewhat fragile (since the monkeypatching sites may\n change).\n \"\"\"\n addr = Address.from_raw(address)\n daemon = Daemon(**kwargs)\n\n _start_server = (lambda p: start_server(daemon, addr.host, p))\n _start_server.orig = start_server\n _start_client = (lambda h, p: start_client(daemon, h, p))\n _start_client.orig = start_client\n\n # These are the functions pydevd invokes to get a socket to the client.\n pydevd_comm.start_server = _start_server\n pydevd_comm.start_client = _start_client\n\n # Ensure that pydevd is using our functions.\n pydevd.start_server = _start_server\n pydevd.start_client = _start_client\n __main__ = sys.modules['__main__']\n if __main__ is not pydevd:\n if getattr(__main__, '__file__', None) == pydevd.__file__:\n __main__.start_server = _start_server\n __main__.start_client = _start_client\n return daemon\n", "path": "ptvsd/pydevd_hooks.py"}]} | 1,600 | 202 |
gh_patches_debug_26783 | rasdani/github-patches | git_diff | bridgecrewio__checkov-748 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
False positive for check 'MSKClusterEncryption' (CKV_AWS_81)
**Describe the bug**
CKV_AWS_81 is reporting MSK clusters as unencrypted at rest while they are encrypted at rest.
**To Reproduce**
Example Terraform code producing a cluster with encryption (it will use the default KMS key as documented in https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn)
(slightly adapted from example on the msk_cluster resource docs)
<details>
<summary>Code</summary>
```
resource "aws_vpc" "vpc" {
cidr_block = "192.168.0.0/22"
}
data "aws_availability_zones" "azs" {
state = "available"
}
resource "aws_subnet" "subnet_az1" {
availability_zone = data.aws_availability_zones.azs.names[0]
cidr_block = "192.168.0.0/24"
vpc_id = aws_vpc.vpc.id
}
resource "aws_subnet" "subnet_az2" {
availability_zone = data.aws_availability_zones.azs.names[1]
cidr_block = "192.168.1.0/24"
vpc_id = aws_vpc.vpc.id
}
resource "aws_subnet" "subnet_az3" {
availability_zone = data.aws_availability_zones.azs.names[2]
cidr_block = "192.168.2.0/24"
vpc_id = aws_vpc.vpc.id
}
resource "aws_security_group" "sg" {
vpc_id = aws_vpc.vpc.id
}
resource "aws_kms_key" "kms" {
description = "example"
}
resource "aws_cloudwatch_log_group" "test" {
name = "msk_broker_logs"
}
resource "aws_s3_bucket" "bucket" {
bucket = "msk-broker-logs-bucket"
acl = "private"
}
resource "aws_iam_role" "firehose_role" {
name = "firehose_test_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Action": "sts:AssumeRole",
"Principal": {
"Service": "firehose.amazonaws.com"
},
"Effect": "Allow",
"Sid": ""
}
]
}
EOF
}
resource "aws_kinesis_firehose_delivery_stream" "test_stream" {
name = "terraform-kinesis-firehose-msk-broker-logs-stream"
destination = "s3"
s3_configuration {
role_arn = aws_iam_role.firehose_role.arn
bucket_arn = aws_s3_bucket.bucket.arn
}
tags = {
LogDeliveryEnabled = "placeholder"
}
lifecycle {
ignore_changes = [
tags["LogDeliveryEnabled"],
]
}
}
resource "aws_msk_cluster" "example" {
cluster_name = "example"
kafka_version = "2.4.1"
number_of_broker_nodes = 3
broker_node_group_info {
instance_type = "kafka.m5.large"
ebs_volume_size = 1000
client_subnets = [
aws_subnet.subnet_az1.id,
aws_subnet.subnet_az2.id,
aws_subnet.subnet_az3.id,
]
security_groups = [aws_security_group.sg.id]
}
encryption_info {
encryption_in_transit {
client_broker = "TLS"
in_cluster = true
}
}
open_monitoring {
prometheus {
jmx_exporter {
enabled_in_broker = true
}
node_exporter {
enabled_in_broker = true
}
}
}
logging_info {
broker_logs {
cloudwatch_logs {
enabled = true
log_group = aws_cloudwatch_log_group.test.name
}
firehose {
enabled = true
delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name
}
s3 {
enabled = true
bucket = aws_s3_bucket.bucket.id
prefix = "logs/msk-"
}
}
}
tags = {
foo = "bar"
}
}
output "zookeeper_connect_string" {
value = aws_msk_cluster.example.zookeeper_connect_string
}
output "bootstrap_brokers_tls" {
description = "TLS connection host:port pairs"
value = aws_msk_cluster.example.bootstrap_brokers_tls
}
```
</details>
**Expected behavior**
No error is thrown as long as an encryption_info block is defined
Checkov Version 1.0.669
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/terraform/checks/resource/aws/MSKClusterEncryption.py`
Content:
```
1 from checkov.common.models.enums import CheckResult, CheckCategories
2 from checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck
3
4
5 class MSKClusterEncryption(BaseResourceCheck):
6 def __init__(self):
7 name = "Ensure MSK Cluster encryption in rest and transit is enabled"
8 id = "CKV_AWS_81"
9 supported_resources = ['aws_msk_cluster']
10 categories = [CheckCategories.ENCRYPTION]
11 super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
12
13 def scan_resource_conf(self, conf):
14 if 'encryption_info' in conf.keys():
15 encryption = conf['encryption_info'][0]
16 if 'encryption_at_rest_kms_key_arn' in encryption:
17 if 'encryption_in_transit' in encryption:
18 transit = encryption['encryption_in_transit'][0]
19 if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \
20 'in_cluster' in transit and transit['in_cluster'][0] is False:
21 return CheckResult.FAILED
22 return CheckResult.PASSED
23 return CheckResult.PASSED
24 return CheckResult.FAILED
25
26
27 check = MSKClusterEncryption()
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py
--- a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py
+++ b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py
@@ -11,16 +11,18 @@
super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)
def scan_resource_conf(self, conf):
+ # Note: As long as the 'encryption_info' block is specified, the cluster
+ # will be encrypted at rest even if 'encryption_at_rest_kms_key_arn' is not specified
+ # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn
if 'encryption_info' in conf.keys():
encryption = conf['encryption_info'][0]
- if 'encryption_at_rest_kms_key_arn' in encryption:
- if 'encryption_in_transit' in encryption:
- transit = encryption['encryption_in_transit'][0]
- if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \
- 'in_cluster' in transit and transit['in_cluster'][0] is False:
- return CheckResult.FAILED
- return CheckResult.PASSED
+ if 'encryption_in_transit' in encryption:
+ transit = encryption['encryption_in_transit'][0]
+ if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \
+ 'in_cluster' in transit and transit['in_cluster'][0] is False:
+ return CheckResult.FAILED
return CheckResult.PASSED
+ return CheckResult.PASSED
return CheckResult.FAILED
| {"golden_diff": "diff --git a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py\n--- a/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py\n+++ b/checkov/terraform/checks/resource/aws/MSKClusterEncryption.py\n@@ -11,16 +11,18 @@\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n \n def scan_resource_conf(self, conf):\n+ # Note: As long as the 'encryption_info' block is specified, the cluster\n+ # will be encrypted at rest even if 'encryption_at_rest_kms_key_arn' is not specified\n+ # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn\n if 'encryption_info' in conf.keys():\n encryption = conf['encryption_info'][0]\n- if 'encryption_at_rest_kms_key_arn' in encryption:\n- if 'encryption_in_transit' in encryption:\n- transit = encryption['encryption_in_transit'][0]\n- if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n- 'in_cluster' in transit and transit['in_cluster'][0] is False:\n- return CheckResult.FAILED\n- return CheckResult.PASSED\n+ if 'encryption_in_transit' in encryption:\n+ transit = encryption['encryption_in_transit'][0]\n+ if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n+ 'in_cluster' in transit and transit['in_cluster'][0] is False:\n+ return CheckResult.FAILED\n return CheckResult.PASSED\n+ return CheckResult.PASSED\n return CheckResult.FAILED\n", "issue": "False positive for check 'MSKClusterEncryption' (CKV_AWS_81)\n**Describe the bug**\r\nCKV_AWS_81 is reporting MSK clusters as unencrypted at rest while they are encrypted at rest.\r\n\r\n**To Reproduce**\r\nExample Terraform code producing a cluster with encryption (it will use the default KMS key as documented in https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn)\r\n\r\n(slightly adapted from example on the msk_cluster resource docs)\r\n<details>\r\n <summary>Code</summary>\r\n\r\n```\r\nresource \"aws_vpc\" \"vpc\" {\r\n cidr_block = \"192.168.0.0/22\"\r\n}\r\n\r\ndata \"aws_availability_zones\" \"azs\" {\r\n state = \"available\"\r\n}\r\n\r\nresource \"aws_subnet\" \"subnet_az1\" {\r\n availability_zone = data.aws_availability_zones.azs.names[0]\r\n cidr_block = \"192.168.0.0/24\"\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_subnet\" \"subnet_az2\" {\r\n availability_zone = data.aws_availability_zones.azs.names[1]\r\n cidr_block = \"192.168.1.0/24\"\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_subnet\" \"subnet_az3\" {\r\n availability_zone = data.aws_availability_zones.azs.names[2]\r\n cidr_block = \"192.168.2.0/24\"\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_security_group\" \"sg\" {\r\n vpc_id = aws_vpc.vpc.id\r\n}\r\n\r\nresource \"aws_kms_key\" \"kms\" {\r\n description = \"example\"\r\n}\r\n\r\nresource \"aws_cloudwatch_log_group\" \"test\" {\r\n name = \"msk_broker_logs\"\r\n}\r\n\r\nresource \"aws_s3_bucket\" \"bucket\" {\r\n bucket = \"msk-broker-logs-bucket\"\r\n acl = \"private\"\r\n}\r\n\r\nresource \"aws_iam_role\" \"firehose_role\" {\r\n name = \"firehose_test_role\"\r\n\r\n assume_role_policy = <<EOF\r\n{\r\n\"Version\": \"2012-10-17\",\r\n\"Statement\": [\r\n {\r\n \"Action\": \"sts:AssumeRole\",\r\n \"Principal\": {\r\n \"Service\": \"firehose.amazonaws.com\"\r\n },\r\n \"Effect\": \"Allow\",\r\n \"Sid\": \"\"\r\n }\r\n ]\r\n}\r\nEOF\r\n}\r\n\r\nresource \"aws_kinesis_firehose_delivery_stream\" \"test_stream\" {\r\n name = \"terraform-kinesis-firehose-msk-broker-logs-stream\"\r\n destination = \"s3\"\r\n\r\n s3_configuration {\r\n role_arn = aws_iam_role.firehose_role.arn\r\n bucket_arn = aws_s3_bucket.bucket.arn\r\n }\r\n\r\n tags = {\r\n LogDeliveryEnabled = \"placeholder\"\r\n }\r\n\r\n lifecycle {\r\n ignore_changes = [\r\n tags[\"LogDeliveryEnabled\"],\r\n ]\r\n }\r\n}\r\n\r\nresource \"aws_msk_cluster\" \"example\" {\r\n cluster_name = \"example\"\r\n kafka_version = \"2.4.1\"\r\n number_of_broker_nodes = 3\r\n\r\n broker_node_group_info {\r\n instance_type = \"kafka.m5.large\"\r\n ebs_volume_size = 1000\r\n client_subnets = [\r\n aws_subnet.subnet_az1.id,\r\n aws_subnet.subnet_az2.id,\r\n aws_subnet.subnet_az3.id,\r\n ]\r\n security_groups = [aws_security_group.sg.id]\r\n }\r\n\r\n encryption_info {\r\n encryption_in_transit { \r\n client_broker = \"TLS\"\r\n in_cluster = true \r\n }\r\n }\r\n\r\n open_monitoring {\r\n prometheus {\r\n jmx_exporter {\r\n enabled_in_broker = true\r\n }\r\n node_exporter {\r\n enabled_in_broker = true\r\n }\r\n }\r\n }\r\n\r\n logging_info {\r\n broker_logs {\r\n cloudwatch_logs {\r\n enabled = true\r\n log_group = aws_cloudwatch_log_group.test.name\r\n }\r\n firehose {\r\n enabled = true\r\n delivery_stream = aws_kinesis_firehose_delivery_stream.test_stream.name\r\n }\r\n s3 {\r\n enabled = true\r\n bucket = aws_s3_bucket.bucket.id\r\n prefix = \"logs/msk-\"\r\n }\r\n }\r\n }\r\n\r\n tags = {\r\n foo = \"bar\"\r\n }\r\n}\r\n\r\noutput \"zookeeper_connect_string\" {\r\n value = aws_msk_cluster.example.zookeeper_connect_string\r\n}\r\n\r\noutput \"bootstrap_brokers_tls\" {\r\n description = \"TLS connection host:port pairs\"\r\n value = aws_msk_cluster.example.bootstrap_brokers_tls\r\n}\r\n```\r\n\r\n</details>\r\n\r\n**Expected behavior**\r\nNo error is thrown as long as an encryption_info block is defined\r\n\r\n\r\n Checkov Version 1.0.669\r\n\n", "before_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass MSKClusterEncryption(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure MSK Cluster encryption in rest and transit is enabled\"\n id = \"CKV_AWS_81\"\n supported_resources = ['aws_msk_cluster']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n if 'encryption_info' in conf.keys():\n encryption = conf['encryption_info'][0]\n if 'encryption_at_rest_kms_key_arn' in encryption:\n if 'encryption_in_transit' in encryption:\n transit = encryption['encryption_in_transit'][0]\n if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n 'in_cluster' in transit and transit['in_cluster'][0] is False:\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = MSKClusterEncryption()\n", "path": "checkov/terraform/checks/resource/aws/MSKClusterEncryption.py"}], "after_files": [{"content": "from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.terraform.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass MSKClusterEncryption(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure MSK Cluster encryption in rest and transit is enabled\"\n id = \"CKV_AWS_81\"\n supported_resources = ['aws_msk_cluster']\n categories = [CheckCategories.ENCRYPTION]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n # Note: As long as the 'encryption_info' block is specified, the cluster\n # will be encrypted at rest even if 'encryption_at_rest_kms_key_arn' is not specified\n # See https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/msk_cluster#encryption_at_rest_kms_key_arn\n if 'encryption_info' in conf.keys():\n encryption = conf['encryption_info'][0]\n if 'encryption_in_transit' in encryption:\n transit = encryption['encryption_in_transit'][0]\n if 'client_broker' in transit and transit['client_broker'][0] != 'TLS' or \\\n 'in_cluster' in transit and transit['in_cluster'][0] is False:\n return CheckResult.FAILED\n return CheckResult.PASSED\n return CheckResult.PASSED\n return CheckResult.FAILED\n\n\ncheck = MSKClusterEncryption()\n", "path": "checkov/terraform/checks/resource/aws/MSKClusterEncryption.py"}]} | 1,664 | 414 |
gh_patches_debug_19960 | rasdani/github-patches | git_diff | microsoft__Qcodes-565 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
visa_handle.clear on instantiation causes some instruments to freeze
The use of visa_handle.clear on the instantiation of a VISA instrument causes some instruments to lock up. In particular I've noticed for SERIAL instruments, this command spews some garbage on the serial port that requires the instrument to be reset before it is possible to use it again.
### Steps to reproduce
1. Plug in a buggy instrument and instantiate an instance of it.
### Actual behaviour
The instrument freezes
### System
Win 7
Using the Harvard DecaDAC
2b6d72b
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `qcodes/instrument/visa.py`
Content:
```
1 """Visa instrument driver based on pyvisa."""
2 import visa
3
4 from .base import Instrument
5 import qcodes.utils.validators as vals
6
7
8 class VisaInstrument(Instrument):
9
10 """
11 Base class for all instruments using visa connections.
12
13 Args:
14 name (str): What this instrument is called locally.
15
16 address (str): The visa resource name to use to connect.
17 Optionally includes '@<backend>' at the end. For example,
18 'ASRL2' will open COM2 with the default NI backend, but
19 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes
20 does not install (or even require) ANY backends, it is up to
21 the user to do that. see eg:
22 http://pyvisa.readthedocs.org/en/stable/names.html
23
24 timeout (number): seconds to allow for responses. Default 5.
25
26 terminator: Read termination character(s) to look for. Default ''.
27
28 metadata (Optional[Dict]): additional static metadata to add to this
29 instrument's JSON snapshot.
30
31 See help for ``qcodes.Instrument`` for additional information on writing
32 instrument subclasses.
33
34 Attributes:
35 visa_handle (pyvisa.resources.Resource): The communication channel.
36 """
37
38 def __init__(self, name, address=None, timeout=5, terminator='', **kwargs):
39 super().__init__(name, **kwargs)
40
41 self.add_parameter('timeout',
42 get_cmd=self._get_visa_timeout,
43 set_cmd=self._set_visa_timeout,
44 unit='s',
45 vals=vals.MultiType(vals.Numbers(min_value=0),
46 vals.Enum(None)))
47
48 self.set_address(address)
49 self.set_terminator(terminator)
50 self.timeout.set(timeout)
51
52 def set_address(self, address):
53 """
54 Change the address for this instrument.
55
56 Args:
57 address: The visa resource name to use to connect.
58 Optionally includes '@<backend>' at the end. For example,
59 'ASRL2' will open COM2 with the default NI backend, but
60 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes
61 does not install (or even require) ANY backends, it is up to
62 the user to do that.
63 see eg: http://pyvisa.readthedocs.org/en/stable/names.html
64 """
65 # in case we're changing the address - close the old handle first
66 if getattr(self, 'visa_handle', None):
67 self.visa_handle.close()
68
69 if address and '@' in address:
70 address, visa_library = address.split('@')
71 resource_manager = visa.ResourceManager('@' + visa_library)
72 else:
73 resource_manager = visa.ResourceManager()
74
75 self.visa_handle = resource_manager.open_resource(address)
76
77 self.visa_handle.clear()
78 self._address = address
79
80 def set_terminator(self, terminator):
81 r"""
82 Change the read terminator to use.
83
84 Args:
85 terminator (str): Character(s) to look for at the end of a read.
86 eg. '\r\n'.
87 """
88 self.visa_handle.read_termination = terminator
89 self._terminator = terminator
90
91 def _set_visa_timeout(self, timeout):
92 if timeout is None:
93 self.visa_handle.timeout = None
94 else:
95 # pyvisa uses milliseconds but we use seconds
96 self.visa_handle.timeout = timeout * 1000.0
97
98 def _get_visa_timeout(self):
99 timeout_ms = self.visa_handle.timeout
100 if timeout_ms is None:
101 return None
102 else:
103 # pyvisa uses milliseconds but we use seconds
104 return timeout_ms / 1000
105
106 def close(self):
107 """Disconnect and irreversibly tear down the instrument."""
108 if getattr(self, 'visa_handle', None):
109 self.visa_handle.close()
110 super().close()
111
112 def check_error(self, ret_code):
113 """
114 Default error checking, raises an error if return code !=0.
115
116 Does not differentiate between warnings or specific error messages.
117 Override this function in your driver if you want to add specific
118 error messages.
119
120 Args:
121 ret_code (int): A Visa error code. See eg:
122 https://github.com/hgrecco/pyvisa/blob/master/pyvisa/errors.py
123
124 Raises:
125 visa.VisaIOError: if ``ret_code`` indicates a communication
126 problem.
127 """
128 if ret_code != 0:
129 raise visa.VisaIOError(ret_code)
130
131 def write_raw(self, cmd):
132 """
133 Low-level interface to ``visa_handle.write``.
134
135 Args:
136 cmd (str): The command to send to the instrument.
137 """
138 nr_bytes_written, ret_code = self.visa_handle.write(cmd)
139 self.check_error(ret_code)
140
141 def ask_raw(self, cmd):
142 """
143 Low-level interface to ``visa_handle.ask``.
144
145 Args:
146 cmd (str): The command to send to the instrument.
147
148 Returns:
149 str: The instrument's response.
150 """
151 return self.visa_handle.ask(cmd)
152
153 def snapshot_base(self, update=False):
154 """
155 State of the instrument as a JSON-compatible dict.
156
157 Args:
158 update (bool): If True, update the state by querying the
159 instrument. If False, just use the latest values in memory.
160
161 Returns:
162 dict: base snapshot
163 """
164 snap = super().snapshot_base(update=update)
165
166 snap['address'] = self._address
167 snap['terminator'] = self._terminator
168 snap['timeout'] = self.timeout.get()
169
170 return snap
171
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/qcodes/instrument/visa.py b/qcodes/instrument/visa.py
--- a/qcodes/instrument/visa.py
+++ b/qcodes/instrument/visa.py
@@ -1,10 +1,11 @@
"""Visa instrument driver based on pyvisa."""
import visa
+import pyvisa.constants as vi_const
+import pyvisa.resources
from .base import Instrument
import qcodes.utils.validators as vals
-
class VisaInstrument(Instrument):
"""
@@ -74,7 +75,13 @@
self.visa_handle = resource_manager.open_resource(address)
- self.visa_handle.clear()
+ # Serial instruments have a separate flush method to clear their buffers
+ # which behaves differently to clear. This is particularly important
+ # for instruments which do not support SCPI commands.
+ if isinstance(self.visa_handle, pyvisa.resources.SerialInstrument):
+ self.visa_handle.flush(vi_const.VI_READ_BUF_DISCARD | vi_const.VI_WRITE_BUF_DISCARD)
+ else:
+ self.visa_handle.clear()
self._address = address
def set_terminator(self, terminator):
| {"golden_diff": "diff --git a/qcodes/instrument/visa.py b/qcodes/instrument/visa.py\n--- a/qcodes/instrument/visa.py\n+++ b/qcodes/instrument/visa.py\n@@ -1,10 +1,11 @@\n \"\"\"Visa instrument driver based on pyvisa.\"\"\"\n import visa\n+import pyvisa.constants as vi_const\n+import pyvisa.resources\n \n from .base import Instrument\n import qcodes.utils.validators as vals\n \n-\n class VisaInstrument(Instrument):\n \n \"\"\"\n@@ -74,7 +75,13 @@\n \n self.visa_handle = resource_manager.open_resource(address)\n \n- self.visa_handle.clear()\n+ # Serial instruments have a separate flush method to clear their buffers\n+ # which behaves differently to clear. This is particularly important\n+ # for instruments which do not support SCPI commands.\n+ if isinstance(self.visa_handle, pyvisa.resources.SerialInstrument):\n+ self.visa_handle.flush(vi_const.VI_READ_BUF_DISCARD | vi_const.VI_WRITE_BUF_DISCARD)\n+ else:\n+ self.visa_handle.clear()\n self._address = address\n \n def set_terminator(self, terminator):\n", "issue": "visa_handle.clear on instantiation causes some instruments to freeze\nThe use of visa_handle.clear on the instantiation of a VISA instrument causes some instruments to lock up. In particular I've noticed for SERIAL instruments, this command spews some garbage on the serial port that requires the instrument to be reset before it is possible to use it again.\r\n\r\n### Steps to reproduce\r\n1. Plug in a buggy instrument and instantiate an instance of it.\r\n\r\n### Actual behaviour\r\nThe instrument freezes\r\n\r\n### System\r\nWin 7\r\nUsing the Harvard DecaDAC\r\n2b6d72b\n", "before_files": [{"content": "\"\"\"Visa instrument driver based on pyvisa.\"\"\"\nimport visa\n\nfrom .base import Instrument\nimport qcodes.utils.validators as vals\n\n\nclass VisaInstrument(Instrument):\n\n \"\"\"\n Base class for all instruments using visa connections.\n\n Args:\n name (str): What this instrument is called locally.\n\n address (str): The visa resource name to use to connect.\n Optionally includes '@<backend>' at the end. For example,\n 'ASRL2' will open COM2 with the default NI backend, but\n 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes\n does not install (or even require) ANY backends, it is up to\n the user to do that. see eg:\n http://pyvisa.readthedocs.org/en/stable/names.html\n\n timeout (number): seconds to allow for responses. Default 5.\n\n terminator: Read termination character(s) to look for. Default ''.\n\n metadata (Optional[Dict]): additional static metadata to add to this\n instrument's JSON snapshot.\n\n See help for ``qcodes.Instrument`` for additional information on writing\n instrument subclasses.\n\n Attributes:\n visa_handle (pyvisa.resources.Resource): The communication channel.\n \"\"\"\n\n def __init__(self, name, address=None, timeout=5, terminator='', **kwargs):\n super().__init__(name, **kwargs)\n\n self.add_parameter('timeout',\n get_cmd=self._get_visa_timeout,\n set_cmd=self._set_visa_timeout,\n unit='s',\n vals=vals.MultiType(vals.Numbers(min_value=0),\n vals.Enum(None)))\n\n self.set_address(address)\n self.set_terminator(terminator)\n self.timeout.set(timeout)\n\n def set_address(self, address):\n \"\"\"\n Change the address for this instrument.\n\n Args:\n address: The visa resource name to use to connect.\n Optionally includes '@<backend>' at the end. For example,\n 'ASRL2' will open COM2 with the default NI backend, but\n 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes\n does not install (or even require) ANY backends, it is up to\n the user to do that.\n see eg: http://pyvisa.readthedocs.org/en/stable/names.html\n \"\"\"\n # in case we're changing the address - close the old handle first\n if getattr(self, 'visa_handle', None):\n self.visa_handle.close()\n\n if address and '@' in address:\n address, visa_library = address.split('@')\n resource_manager = visa.ResourceManager('@' + visa_library)\n else:\n resource_manager = visa.ResourceManager()\n\n self.visa_handle = resource_manager.open_resource(address)\n\n self.visa_handle.clear()\n self._address = address\n\n def set_terminator(self, terminator):\n r\"\"\"\n Change the read terminator to use.\n\n Args:\n terminator (str): Character(s) to look for at the end of a read.\n eg. '\\r\\n'.\n \"\"\"\n self.visa_handle.read_termination = terminator\n self._terminator = terminator\n\n def _set_visa_timeout(self, timeout):\n if timeout is None:\n self.visa_handle.timeout = None\n else:\n # pyvisa uses milliseconds but we use seconds\n self.visa_handle.timeout = timeout * 1000.0\n\n def _get_visa_timeout(self):\n timeout_ms = self.visa_handle.timeout\n if timeout_ms is None:\n return None\n else:\n # pyvisa uses milliseconds but we use seconds\n return timeout_ms / 1000\n\n def close(self):\n \"\"\"Disconnect and irreversibly tear down the instrument.\"\"\"\n if getattr(self, 'visa_handle', None):\n self.visa_handle.close()\n super().close()\n\n def check_error(self, ret_code):\n \"\"\"\n Default error checking, raises an error if return code !=0.\n\n Does not differentiate between warnings or specific error messages.\n Override this function in your driver if you want to add specific\n error messages.\n\n Args:\n ret_code (int): A Visa error code. See eg:\n https://github.com/hgrecco/pyvisa/blob/master/pyvisa/errors.py\n\n Raises:\n visa.VisaIOError: if ``ret_code`` indicates a communication\n problem.\n \"\"\"\n if ret_code != 0:\n raise visa.VisaIOError(ret_code)\n\n def write_raw(self, cmd):\n \"\"\"\n Low-level interface to ``visa_handle.write``.\n\n Args:\n cmd (str): The command to send to the instrument.\n \"\"\"\n nr_bytes_written, ret_code = self.visa_handle.write(cmd)\n self.check_error(ret_code)\n\n def ask_raw(self, cmd):\n \"\"\"\n Low-level interface to ``visa_handle.ask``.\n\n Args:\n cmd (str): The command to send to the instrument.\n\n Returns:\n str: The instrument's response.\n \"\"\"\n return self.visa_handle.ask(cmd)\n\n def snapshot_base(self, update=False):\n \"\"\"\n State of the instrument as a JSON-compatible dict.\n\n Args:\n update (bool): If True, update the state by querying the\n instrument. If False, just use the latest values in memory.\n\n Returns:\n dict: base snapshot\n \"\"\"\n snap = super().snapshot_base(update=update)\n\n snap['address'] = self._address\n snap['terminator'] = self._terminator\n snap['timeout'] = self.timeout.get()\n\n return snap\n", "path": "qcodes/instrument/visa.py"}], "after_files": [{"content": "\"\"\"Visa instrument driver based on pyvisa.\"\"\"\nimport visa\nimport pyvisa.constants as vi_const\nimport pyvisa.resources\n\nfrom .base import Instrument\nimport qcodes.utils.validators as vals\n\nclass VisaInstrument(Instrument):\n\n \"\"\"\n Base class for all instruments using visa connections.\n\n Args:\n name (str): What this instrument is called locally.\n\n address (str): The visa resource name to use to connect.\n Optionally includes '@<backend>' at the end. For example,\n 'ASRL2' will open COM2 with the default NI backend, but\n 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes\n does not install (or even require) ANY backends, it is up to\n the user to do that. see eg:\n http://pyvisa.readthedocs.org/en/stable/names.html\n\n timeout (number): seconds to allow for responses. Default 5.\n\n terminator: Read termination character(s) to look for. Default ''.\n\n metadata (Optional[Dict]): additional static metadata to add to this\n instrument's JSON snapshot.\n\n See help for ``qcodes.Instrument`` for additional information on writing\n instrument subclasses.\n\n Attributes:\n visa_handle (pyvisa.resources.Resource): The communication channel.\n \"\"\"\n\n def __init__(self, name, address=None, timeout=5, terminator='', **kwargs):\n super().__init__(name, **kwargs)\n\n self.add_parameter('timeout',\n get_cmd=self._get_visa_timeout,\n set_cmd=self._set_visa_timeout,\n unit='s',\n vals=vals.MultiType(vals.Numbers(min_value=0),\n vals.Enum(None)))\n\n self.set_address(address)\n self.set_terminator(terminator)\n self.timeout.set(timeout)\n\n def set_address(self, address):\n \"\"\"\n Change the address for this instrument.\n\n Args:\n address: The visa resource name to use to connect.\n Optionally includes '@<backend>' at the end. For example,\n 'ASRL2' will open COM2 with the default NI backend, but\n 'ASRL2@py' will open COM2 using pyvisa-py. Note that qcodes\n does not install (or even require) ANY backends, it is up to\n the user to do that.\n see eg: http://pyvisa.readthedocs.org/en/stable/names.html\n \"\"\"\n # in case we're changing the address - close the old handle first\n if getattr(self, 'visa_handle', None):\n self.visa_handle.close()\n\n if address and '@' in address:\n address, visa_library = address.split('@')\n resource_manager = visa.ResourceManager('@' + visa_library)\n else:\n resource_manager = visa.ResourceManager()\n\n self.visa_handle = resource_manager.open_resource(address)\n\n # Serial instruments have a separate flush method to clear their buffers\n # which behaves differently to clear. This is particularly important\n # for instruments which do not support SCPI commands.\n if isinstance(self.visa_handle, pyvisa.resources.SerialInstrument):\n self.visa_handle.flush(vi_const.VI_READ_BUF_DISCARD | vi_const.VI_WRITE_BUF_DISCARD)\n else:\n self.visa_handle.clear()\n self._address = address\n\n def set_terminator(self, terminator):\n r\"\"\"\n Change the read terminator to use.\n\n Args:\n terminator (str): Character(s) to look for at the end of a read.\n eg. '\\r\\n'.\n \"\"\"\n self.visa_handle.read_termination = terminator\n self._terminator = terminator\n\n def _set_visa_timeout(self, timeout):\n if timeout is None:\n self.visa_handle.timeout = None\n else:\n # pyvisa uses milliseconds but we use seconds\n self.visa_handle.timeout = timeout * 1000.0\n\n def _get_visa_timeout(self):\n timeout_ms = self.visa_handle.timeout\n if timeout_ms is None:\n return None\n else:\n # pyvisa uses milliseconds but we use seconds\n return timeout_ms / 1000\n\n def close(self):\n \"\"\"Disconnect and irreversibly tear down the instrument.\"\"\"\n if getattr(self, 'visa_handle', None):\n self.visa_handle.close()\n super().close()\n\n def check_error(self, ret_code):\n \"\"\"\n Default error checking, raises an error if return code !=0.\n\n Does not differentiate between warnings or specific error messages.\n Override this function in your driver if you want to add specific\n error messages.\n\n Args:\n ret_code (int): A Visa error code. See eg:\n https://github.com/hgrecco/pyvisa/blob/master/pyvisa/errors.py\n\n Raises:\n visa.VisaIOError: if ``ret_code`` indicates a communication\n problem.\n \"\"\"\n if ret_code != 0:\n raise visa.VisaIOError(ret_code)\n\n def write_raw(self, cmd):\n \"\"\"\n Low-level interface to ``visa_handle.write``.\n\n Args:\n cmd (str): The command to send to the instrument.\n \"\"\"\n nr_bytes_written, ret_code = self.visa_handle.write(cmd)\n self.check_error(ret_code)\n\n def ask_raw(self, cmd):\n \"\"\"\n Low-level interface to ``visa_handle.ask``.\n\n Args:\n cmd (str): The command to send to the instrument.\n\n Returns:\n str: The instrument's response.\n \"\"\"\n return self.visa_handle.ask(cmd)\n\n def snapshot_base(self, update=False):\n \"\"\"\n State of the instrument as a JSON-compatible dict.\n\n Args:\n update (bool): If True, update the state by querying the\n instrument. If False, just use the latest values in memory.\n\n Returns:\n dict: base snapshot\n \"\"\"\n snap = super().snapshot_base(update=update)\n\n snap['address'] = self._address\n snap['terminator'] = self._terminator\n snap['timeout'] = self.timeout.get()\n\n return snap\n", "path": "qcodes/instrument/visa.py"}]} | 2,009 | 251 |
gh_patches_debug_25982 | rasdani/github-patches | git_diff | digitalfabrik__integreat-cms-1241 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Language Tree isn't handled correctly by cache in Page Tree
### Describe the Bug
So far, our Table rows aren't consistent after we delete a language tree node and recreate it. We should probably empty the cacheops cache after we delete a language tree node.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `integreat_cms/cms/forms/language_tree/language_tree_node_form.py`
Content:
```
1 import logging
2
3 from django import forms
4 from django.utils.text import capfirst
5 from django.utils.translation import ugettext_lazy as _
6
7 from ..custom_model_form import CustomModelForm
8 from ..custom_tree_node_form import CustomTreeNodeForm
9 from ...models import Language, LanguageTreeNode
10
11
12 logger = logging.getLogger(__name__)
13
14
15 class LanguageTreeNodeForm(CustomModelForm, CustomTreeNodeForm):
16 """
17 Form for creating and modifying language tree node objects
18 """
19
20 parent = forms.ModelChoiceField(
21 queryset=LanguageTreeNode.objects.all(),
22 required=False,
23 label=capfirst(LanguageTreeNode._meta.get_field("parent").verbose_name),
24 )
25
26 class Meta:
27 """
28 This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`
29 for more information.
30 """
31
32 #: The model of this :class:`django.forms.ModelForm`
33 model = LanguageTreeNode
34 #: The fields of the model which should be handled by this form
35 fields = ["language", "visible", "active"]
36
37 def __init__(self, **kwargs):
38 r"""
39 Initialize language tree node form
40
41 :param \**kwargs: The supplied keyword arguments
42 :type \**kwargs: dict
43 """
44
45 if "data" in kwargs:
46 # Copy QueryDict because it is immutable
47 data = kwargs.pop("data").copy()
48 # Use the parent node as value for the ref node
49 data["_ref_node_id"] = data["parent"]
50 data["_position"] = "first-child"
51 # Set the kwargs to updated POST data again
52 kwargs["data"] = data
53
54 # Instantiate CustomModelForm
55 super().__init__(**kwargs)
56
57 parent_queryset = self.instance.region.language_tree_nodes
58
59 if self.instance.id:
60 descendant_ids = [
61 descendant.id
62 for descendant in self.instance.get_cached_descendants(
63 include_self=True
64 )
65 ]
66 parent_queryset = parent_queryset.exclude(id__in=descendant_ids)
67 self.fields["parent"].initial = self.instance.parent_id
68 excluded_languages = [
69 language.id
70 for language in self.instance.region.languages
71 if language != self.instance.language
72 ]
73 else:
74 excluded_languages = [
75 language.id for language in self.instance.region.languages
76 ]
77
78 # limit possible parents to nodes of current region
79 self.fields["parent"].queryset = parent_queryset
80 self.fields["_ref_node_id"].choices = self.fields["parent"].choices
81 # limit possible languages to those which are not yet included in the tree
82 self.fields["language"].queryset = Language.objects.exclude(
83 id__in=excluded_languages
84 )
85
86 def clean(self):
87 """
88 Validate form fields which depend on each other, see :meth:`django.forms.Form.clean`:
89 Don't allow multiple root nodes for one region:
90 If self is a root node and the region already has a default language, raise a
91 :class:`~django.core.exceptions.ValidationError`.
92
93 :return: The cleaned form data
94 :rtype: dict
95 """
96 cleaned_data = super().clean()
97 default_language = self.instance.region.default_language
98 # There are two cases in which this error is thrown.
99 # Both cases include that the parent field is None.
100 # 1. The instance does exist:
101 # - The default language is different from the instance language
102 # 2. The instance does not exist:
103 # - The default language exists
104 if not cleaned_data.get("parent") and (
105 (self.instance.id and default_language != self.instance.language)
106 or (not self.instance.id and default_language)
107 ):
108 self.add_error(
109 "parent",
110 forms.ValidationError(
111 _(
112 "This region has already a default language."
113 "Please specify a source language for this language."
114 ),
115 code="invalid",
116 ),
117 )
118 logger.debug(
119 "LanguageTreeNodeForm validated [2] with cleaned data %r", cleaned_data
120 )
121 return cleaned_data
122
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py
--- a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py
+++ b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py
@@ -4,6 +4,8 @@
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
+from cacheops import invalidate_obj
+
from ..custom_model_form import CustomModelForm
from ..custom_tree_node_form import CustomTreeNodeForm
from ...models import Language, LanguageTreeNode
@@ -119,3 +121,25 @@
"LanguageTreeNodeForm validated [2] with cleaned data %r", cleaned_data
)
return cleaned_data
+
+ def save(self, commit=True):
+ """
+ This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to flush
+ the cache after commiting.
+
+ :param commit: Whether or not the changes should be written to the database
+ :type commit: bool
+
+ :return: The saved page translation object
+ :rtype: ~integreat_cms.cms.models.pages.page_translation.PageTranslation
+ """
+ # Save CustomModelForm and flush Cache
+ result = super().save(commit=commit)
+
+ for page in self.instance.region.pages.all():
+ invalidate_obj(page)
+ for poi in self.instance.region.pois.all():
+ invalidate_obj(poi)
+ for event in self.instance.region.events.all():
+ invalidate_obj(event)
+ return result
| {"golden_diff": "diff --git a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py\n--- a/integreat_cms/cms/forms/language_tree/language_tree_node_form.py\n+++ b/integreat_cms/cms/forms/language_tree/language_tree_node_form.py\n@@ -4,6 +4,8 @@\n from django.utils.text import capfirst\n from django.utils.translation import ugettext_lazy as _\n \n+from cacheops import invalidate_obj\n+\n from ..custom_model_form import CustomModelForm\n from ..custom_tree_node_form import CustomTreeNodeForm\n from ...models import Language, LanguageTreeNode\n@@ -119,3 +121,25 @@\n \"LanguageTreeNodeForm validated [2] with cleaned data %r\", cleaned_data\n )\n return cleaned_data\n+\n+ def save(self, commit=True):\n+ \"\"\"\n+ This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to flush\n+ the cache after commiting.\n+\n+ :param commit: Whether or not the changes should be written to the database\n+ :type commit: bool\n+\n+ :return: The saved page translation object\n+ :rtype: ~integreat_cms.cms.models.pages.page_translation.PageTranslation\n+ \"\"\"\n+ # Save CustomModelForm and flush Cache\n+ result = super().save(commit=commit)\n+\n+ for page in self.instance.region.pages.all():\n+ invalidate_obj(page)\n+ for poi in self.instance.region.pois.all():\n+ invalidate_obj(poi)\n+ for event in self.instance.region.events.all():\n+ invalidate_obj(event)\n+ return result\n", "issue": "Language Tree isn't handled correctly by cache in Page Tree\n### Describe the Bug\r\nSo far, our Table rows aren't consistent after we delete a language tree node and recreate it. We should probably empty the cacheops cache after we delete a language tree node.\r\n\n", "before_files": [{"content": "import logging\n\nfrom django import forms\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom ..custom_model_form import CustomModelForm\nfrom ..custom_tree_node_form import CustomTreeNodeForm\nfrom ...models import Language, LanguageTreeNode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LanguageTreeNodeForm(CustomModelForm, CustomTreeNodeForm):\n \"\"\"\n Form for creating and modifying language tree node objects\n \"\"\"\n\n parent = forms.ModelChoiceField(\n queryset=LanguageTreeNode.objects.all(),\n required=False,\n label=capfirst(LanguageTreeNode._meta.get_field(\"parent\").verbose_name),\n )\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = LanguageTreeNode\n #: The fields of the model which should be handled by this form\n fields = [\"language\", \"visible\", \"active\"]\n\n def __init__(self, **kwargs):\n r\"\"\"\n Initialize language tree node form\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n \"\"\"\n\n if \"data\" in kwargs:\n # Copy QueryDict because it is immutable\n data = kwargs.pop(\"data\").copy()\n # Use the parent node as value for the ref node\n data[\"_ref_node_id\"] = data[\"parent\"]\n data[\"_position\"] = \"first-child\"\n # Set the kwargs to updated POST data again\n kwargs[\"data\"] = data\n\n # Instantiate CustomModelForm\n super().__init__(**kwargs)\n\n parent_queryset = self.instance.region.language_tree_nodes\n\n if self.instance.id:\n descendant_ids = [\n descendant.id\n for descendant in self.instance.get_cached_descendants(\n include_self=True\n )\n ]\n parent_queryset = parent_queryset.exclude(id__in=descendant_ids)\n self.fields[\"parent\"].initial = self.instance.parent_id\n excluded_languages = [\n language.id\n for language in self.instance.region.languages\n if language != self.instance.language\n ]\n else:\n excluded_languages = [\n language.id for language in self.instance.region.languages\n ]\n\n # limit possible parents to nodes of current region\n self.fields[\"parent\"].queryset = parent_queryset\n self.fields[\"_ref_node_id\"].choices = self.fields[\"parent\"].choices\n # limit possible languages to those which are not yet included in the tree\n self.fields[\"language\"].queryset = Language.objects.exclude(\n id__in=excluded_languages\n )\n\n def clean(self):\n \"\"\"\n Validate form fields which depend on each other, see :meth:`django.forms.Form.clean`:\n Don't allow multiple root nodes for one region:\n If self is a root node and the region already has a default language, raise a\n :class:`~django.core.exceptions.ValidationError`.\n\n :return: The cleaned form data\n :rtype: dict\n \"\"\"\n cleaned_data = super().clean()\n default_language = self.instance.region.default_language\n # There are two cases in which this error is thrown.\n # Both cases include that the parent field is None.\n # 1. The instance does exist:\n # - The default language is different from the instance language\n # 2. The instance does not exist:\n # - The default language exists\n if not cleaned_data.get(\"parent\") and (\n (self.instance.id and default_language != self.instance.language)\n or (not self.instance.id and default_language)\n ):\n self.add_error(\n \"parent\",\n forms.ValidationError(\n _(\n \"This region has already a default language.\"\n \"Please specify a source language for this language.\"\n ),\n code=\"invalid\",\n ),\n )\n logger.debug(\n \"LanguageTreeNodeForm validated [2] with cleaned data %r\", cleaned_data\n )\n return cleaned_data\n", "path": "integreat_cms/cms/forms/language_tree/language_tree_node_form.py"}], "after_files": [{"content": "import logging\n\nfrom django import forms\nfrom django.utils.text import capfirst\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom cacheops import invalidate_obj\n\nfrom ..custom_model_form import CustomModelForm\nfrom ..custom_tree_node_form import CustomTreeNodeForm\nfrom ...models import Language, LanguageTreeNode\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass LanguageTreeNodeForm(CustomModelForm, CustomTreeNodeForm):\n \"\"\"\n Form for creating and modifying language tree node objects\n \"\"\"\n\n parent = forms.ModelChoiceField(\n queryset=LanguageTreeNode.objects.all(),\n required=False,\n label=capfirst(LanguageTreeNode._meta.get_field(\"parent\").verbose_name),\n )\n\n class Meta:\n \"\"\"\n This class contains additional meta configuration of the form class, see the :class:`django.forms.ModelForm`\n for more information.\n \"\"\"\n\n #: The model of this :class:`django.forms.ModelForm`\n model = LanguageTreeNode\n #: The fields of the model which should be handled by this form\n fields = [\"language\", \"visible\", \"active\"]\n\n def __init__(self, **kwargs):\n r\"\"\"\n Initialize language tree node form\n\n :param \\**kwargs: The supplied keyword arguments\n :type \\**kwargs: dict\n \"\"\"\n\n if \"data\" in kwargs:\n # Copy QueryDict because it is immutable\n data = kwargs.pop(\"data\").copy()\n # Use the parent node as value for the ref node\n data[\"_ref_node_id\"] = data[\"parent\"]\n data[\"_position\"] = \"first-child\"\n # Set the kwargs to updated POST data again\n kwargs[\"data\"] = data\n\n # Instantiate CustomModelForm\n super().__init__(**kwargs)\n\n parent_queryset = self.instance.region.language_tree_nodes\n\n if self.instance.id:\n descendant_ids = [\n descendant.id\n for descendant in self.instance.get_cached_descendants(\n include_self=True\n )\n ]\n parent_queryset = parent_queryset.exclude(id__in=descendant_ids)\n self.fields[\"parent\"].initial = self.instance.parent_id\n excluded_languages = [\n language.id\n for language in self.instance.region.languages\n if language != self.instance.language\n ]\n else:\n excluded_languages = [\n language.id for language in self.instance.region.languages\n ]\n\n # limit possible parents to nodes of current region\n self.fields[\"parent\"].queryset = parent_queryset\n self.fields[\"_ref_node_id\"].choices = self.fields[\"parent\"].choices\n # limit possible languages to those which are not yet included in the tree\n self.fields[\"language\"].queryset = Language.objects.exclude(\n id__in=excluded_languages\n )\n\n def clean(self):\n \"\"\"\n Validate form fields which depend on each other, see :meth:`django.forms.Form.clean`:\n Don't allow multiple root nodes for one region:\n If self is a root node and the region already has a default language, raise a\n :class:`~django.core.exceptions.ValidationError`.\n\n :return: The cleaned form data\n :rtype: dict\n \"\"\"\n cleaned_data = super().clean()\n default_language = self.instance.region.default_language\n # There are two cases in which this error is thrown.\n # Both cases include that the parent field is None.\n # 1. The instance does exist:\n # - The default language is different from the instance language\n # 2. The instance does not exist:\n # - The default language exists\n if not cleaned_data.get(\"parent\") and (\n (self.instance.id and default_language != self.instance.language)\n or (not self.instance.id and default_language)\n ):\n self.add_error(\n \"parent\",\n forms.ValidationError(\n _(\n \"This region has already a default language.\"\n \"Please specify a source language for this language.\"\n ),\n code=\"invalid\",\n ),\n )\n logger.debug(\n \"LanguageTreeNodeForm validated [2] with cleaned data %r\", cleaned_data\n )\n return cleaned_data\n\n def save(self, commit=True):\n \"\"\"\n This method extends the default ``save()``-method of the base :class:`~django.forms.ModelForm` to flush\n the cache after commiting.\n\n :param commit: Whether or not the changes should be written to the database\n :type commit: bool\n\n :return: The saved page translation object\n :rtype: ~integreat_cms.cms.models.pages.page_translation.PageTranslation\n \"\"\"\n # Save CustomModelForm and flush Cache\n result = super().save(commit=commit)\n\n for page in self.instance.region.pages.all():\n invalidate_obj(page)\n for poi in self.instance.region.pois.all():\n invalidate_obj(poi)\n for event in self.instance.region.events.all():\n invalidate_obj(event)\n return result\n", "path": "integreat_cms/cms/forms/language_tree/language_tree_node_form.py"}]} | 1,436 | 365 |
gh_patches_debug_11755 | rasdani/github-patches | git_diff | kserve__kserve-705 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
KFServing Transformer does not work with onnxruntime and trtis model server
/kind bug
**What steps did you take and what happened:**
Currently kfserving transformer does not work with onnxruntime and trtis model server due to following validation because their data plane do not support `instances` key in JSON.
https://github.com/kubeflow/kfserving/blob/master/python/kfserving/kfserving/handlers/http.py#L24
**What did you expect to happen:**
Before we get to V2 data plane with agreed protocol across model servers, we should relax this validation.
**Anything else you would like to add:**
[Miscellaneous information that will assist in solving the issue.]
**Environment:**
- Istio Version:
- Knative Version:
- KFServing Version: 0.2.2
- Kubeflow version:
- Minikube version:
- Kubernetes version: (use `kubectl version`):
- OS (e.g. from `/etc/os-release`):
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `python/kfserving/kfserving/handlers/http.py`
Content:
```
1 import tornado.web
2 import json
3 from typing import Dict
4 from http import HTTPStatus
5 from kfserving.kfmodel import KFModel
6
7
8 class HTTPHandler(tornado.web.RequestHandler):
9 def initialize(self, models: Dict[str, KFModel]):
10 self.models = models # pylint:disable=attribute-defined-outside-init
11
12 def get_model(self, name: str):
13 if name not in self.models:
14 raise tornado.web.HTTPError(
15 status_code=HTTPStatus.NOT_FOUND,
16 reason="Model with name %s does not exist." % name
17 )
18 model = self.models[name]
19 if not model.ready:
20 model.load()
21 return model
22
23 def validate(self, request):
24 if "instances" not in request:
25 raise tornado.web.HTTPError(
26 status_code=HTTPStatus.BAD_REQUEST,
27 reason="Expected key \"instances\" in request body"
28 )
29
30 if not isinstance(request["instances"], list):
31 raise tornado.web.HTTPError(
32 status_code=HTTPStatus.BAD_REQUEST,
33 reason="Expected \"instances\" to be a list"
34 )
35 return request
36
37
38 class PredictHandler(HTTPHandler):
39 def post(self, name: str):
40 model = self.get_model(name)
41 try:
42 body = json.loads(self.request.body)
43 except json.decoder.JSONDecodeError as e:
44 raise tornado.web.HTTPError(
45 status_code=HTTPStatus.BAD_REQUEST,
46 reason="Unrecognized request format: %s" % e
47 )
48 request = model.preprocess(body)
49 request = self.validate(request)
50 response = model.predict(request)
51 response = model.postprocess(response)
52 self.write(response)
53
54
55 class ExplainHandler(HTTPHandler):
56 def post(self, name: str):
57 model = self.get_model(name)
58 try:
59 body = json.loads(self.request.body)
60 except json.decoder.JSONDecodeError as e:
61 raise tornado.web.HTTPError(
62 status_code=HTTPStatus.BAD_REQUEST,
63 reason="Unrecognized request format: %s" % e
64 )
65 request = model.preprocess(body)
66 request = self.validate(request)
67 response = model.explain(request)
68 response = model.postprocess(response)
69 self.write(response)
70
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/python/kfserving/kfserving/handlers/http.py b/python/kfserving/kfserving/handlers/http.py
--- a/python/kfserving/kfserving/handlers/http.py
+++ b/python/kfserving/kfserving/handlers/http.py
@@ -21,13 +21,7 @@
return model
def validate(self, request):
- if "instances" not in request:
- raise tornado.web.HTTPError(
- status_code=HTTPStatus.BAD_REQUEST,
- reason="Expected key \"instances\" in request body"
- )
-
- if not isinstance(request["instances"], list):
+ if "instances" in request and not isinstance(request["instances"], list):
raise tornado.web.HTTPError(
status_code=HTTPStatus.BAD_REQUEST,
reason="Expected \"instances\" to be a list"
| {"golden_diff": "diff --git a/python/kfserving/kfserving/handlers/http.py b/python/kfserving/kfserving/handlers/http.py\n--- a/python/kfserving/kfserving/handlers/http.py\n+++ b/python/kfserving/kfserving/handlers/http.py\n@@ -21,13 +21,7 @@\n return model\n \n def validate(self, request):\n- if \"instances\" not in request:\n- raise tornado.web.HTTPError(\n- status_code=HTTPStatus.BAD_REQUEST,\n- reason=\"Expected key \\\"instances\\\" in request body\"\n- )\n-\n- if not isinstance(request[\"instances\"], list):\n+ if \"instances\" in request and not isinstance(request[\"instances\"], list):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" to be a list\"\n", "issue": "KFServing Transformer does not work with onnxruntime and trtis model server\n/kind bug\r\n\r\n**What steps did you take and what happened:**\r\nCurrently kfserving transformer does not work with onnxruntime and trtis model server due to following validation because their data plane do not support `instances` key in JSON.\r\nhttps://github.com/kubeflow/kfserving/blob/master/python/kfserving/kfserving/handlers/http.py#L24 \r\n\r\n\r\n\r\n**What did you expect to happen:**\r\nBefore we get to V2 data plane with agreed protocol across model servers, we should relax this validation.\r\n\r\n**Anything else you would like to add:**\r\n[Miscellaneous information that will assist in solving the issue.]\r\n\r\n\r\n**Environment:**\r\n\r\n- Istio Version:\r\n- Knative Version:\r\n- KFServing Version: 0.2.2\r\n- Kubeflow version:\r\n- Minikube version:\r\n- Kubernetes version: (use `kubectl version`):\r\n- OS (e.g. from `/etc/os-release`):\r\n\n", "before_files": [{"content": "import tornado.web\nimport json\nfrom typing import Dict\nfrom http import HTTPStatus\nfrom kfserving.kfmodel import KFModel\n\n\nclass HTTPHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get_model(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.NOT_FOUND,\n reason=\"Model with name %s does not exist.\" % name\n )\n model = self.models[name]\n if not model.ready:\n model.load()\n return model\n\n def validate(self, request):\n if \"instances\" not in request:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected key \\\"instances\\\" in request body\"\n )\n\n if not isinstance(request[\"instances\"], list):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" to be a list\"\n )\n return request\n\n\nclass PredictHandler(HTTPHandler):\n def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = model.predict(request)\n response = model.postprocess(response)\n self.write(response)\n\n\nclass ExplainHandler(HTTPHandler):\n def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = model.explain(request)\n response = model.postprocess(response)\n self.write(response)\n", "path": "python/kfserving/kfserving/handlers/http.py"}], "after_files": [{"content": "import tornado.web\nimport json\nfrom typing import Dict\nfrom http import HTTPStatus\nfrom kfserving.kfmodel import KFModel\n\n\nclass HTTPHandler(tornado.web.RequestHandler):\n def initialize(self, models: Dict[str, KFModel]):\n self.models = models # pylint:disable=attribute-defined-outside-init\n\n def get_model(self, name: str):\n if name not in self.models:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.NOT_FOUND,\n reason=\"Model with name %s does not exist.\" % name\n )\n model = self.models[name]\n if not model.ready:\n model.load()\n return model\n\n def validate(self, request):\n if \"instances\" in request and not isinstance(request[\"instances\"], list):\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Expected \\\"instances\\\" to be a list\"\n )\n return request\n\n\nclass PredictHandler(HTTPHandler):\n def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = model.predict(request)\n response = model.postprocess(response)\n self.write(response)\n\n\nclass ExplainHandler(HTTPHandler):\n def post(self, name: str):\n model = self.get_model(name)\n try:\n body = json.loads(self.request.body)\n except json.decoder.JSONDecodeError as e:\n raise tornado.web.HTTPError(\n status_code=HTTPStatus.BAD_REQUEST,\n reason=\"Unrecognized request format: %s\" % e\n )\n request = model.preprocess(body)\n request = self.validate(request)\n response = model.explain(request)\n response = model.postprocess(response)\n self.write(response)\n", "path": "python/kfserving/kfserving/handlers/http.py"}]} | 1,079 | 185 |
gh_patches_debug_7329 | rasdani/github-patches | git_diff | dbt-labs__dbt-core-8842 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[CT-3213] [Bug] nitpick: incorrect docstring
should be `BaseAdapter.capabilities()` not `BaseAdapter.has_feature()`
https://github.com/dbt-labs/dbt-core/blob/1baebb423c82a9c645e59b390fc3a69089623600/core/dbt/adapters/capability.py#L7
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `core/dbt/adapters/capability.py`
Content:
```
1 from dataclasses import dataclass
2 from enum import Enum
3 from typing import Optional, DefaultDict, Mapping
4
5
6 class Capability(str, Enum):
7 """Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()"""
8
9 SchemaMetadataByRelations = "SchemaMetadataByRelations"
10 """Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving
11 all the relations in a schema."""
12
13 TableLastModifiedMetadata = "TableLastModifiedMetadata"
14 """Indicates support for determining the time of the last table modification by querying database metadata."""
15
16
17 class Support(str, Enum):
18 Unknown = "Unknown"
19 """The adapter has not declared whether this capability is a feature of the underlying DBMS."""
20
21 Unsupported = "Unsupported"
22 """This capability is not possible with the underlying DBMS, so the adapter does not implement related macros."""
23
24 NotImplemented = "NotImplemented"
25 """This capability is available in the underlying DBMS, but support has not yet been implemented in the adapter."""
26
27 Versioned = "Versioned"
28 """Some versions of the DBMS supported by the adapter support this capability and the adapter has implemented any
29 macros needed to use it."""
30
31 Full = "Full"
32 """All versions of the DBMS supported by the adapter support this capability and the adapter has implemented any
33 macros needed to use it."""
34
35
36 @dataclass
37 class CapabilitySupport:
38 support: Support
39 first_version: Optional[str] = None
40
41 def __bool__(self):
42 return self.support == Support.Versioned or self.support == Support.Full
43
44
45 class CapabilityDict(DefaultDict[Capability, CapabilitySupport]):
46 def __init__(self, vals: Mapping[Capability, CapabilitySupport]):
47 super().__init__(self._default)
48 self.update(vals)
49
50 @staticmethod
51 def _default():
52 return CapabilitySupport(support=Support.Unknown)
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/core/dbt/adapters/capability.py b/core/dbt/adapters/capability.py
--- a/core/dbt/adapters/capability.py
+++ b/core/dbt/adapters/capability.py
@@ -4,7 +4,7 @@
class Capability(str, Enum):
- """Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()"""
+ """Enumeration of optional adapter features which can be probed using BaseAdapter.capabilities()"""
SchemaMetadataByRelations = "SchemaMetadataByRelations"
"""Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving
| {"golden_diff": "diff --git a/core/dbt/adapters/capability.py b/core/dbt/adapters/capability.py\n--- a/core/dbt/adapters/capability.py\n+++ b/core/dbt/adapters/capability.py\n@@ -4,7 +4,7 @@\n \n \n class Capability(str, Enum):\n- \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()\"\"\"\n+ \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.capabilities()\"\"\"\n \n SchemaMetadataByRelations = \"SchemaMetadataByRelations\"\n \"\"\"Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving\n", "issue": "[CT-3213] [Bug] nitpick: incorrect docstring\nshould be `BaseAdapter.capabilities()` not `BaseAdapter.has_feature()`\r\n\r\nhttps://github.com/dbt-labs/dbt-core/blob/1baebb423c82a9c645e59b390fc3a69089623600/core/dbt/adapters/capability.py#L7\n", "before_files": [{"content": "from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Optional, DefaultDict, Mapping\n\n\nclass Capability(str, Enum):\n \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.has_feature()\"\"\"\n\n SchemaMetadataByRelations = \"SchemaMetadataByRelations\"\n \"\"\"Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving\n all the relations in a schema.\"\"\"\n\n TableLastModifiedMetadata = \"TableLastModifiedMetadata\"\n \"\"\"Indicates support for determining the time of the last table modification by querying database metadata.\"\"\"\n\n\nclass Support(str, Enum):\n Unknown = \"Unknown\"\n \"\"\"The adapter has not declared whether this capability is a feature of the underlying DBMS.\"\"\"\n\n Unsupported = \"Unsupported\"\n \"\"\"This capability is not possible with the underlying DBMS, so the adapter does not implement related macros.\"\"\"\n\n NotImplemented = \"NotImplemented\"\n \"\"\"This capability is available in the underlying DBMS, but support has not yet been implemented in the adapter.\"\"\"\n\n Versioned = \"Versioned\"\n \"\"\"Some versions of the DBMS supported by the adapter support this capability and the adapter has implemented any\n macros needed to use it.\"\"\"\n\n Full = \"Full\"\n \"\"\"All versions of the DBMS supported by the adapter support this capability and the adapter has implemented any\n macros needed to use it.\"\"\"\n\n\n@dataclass\nclass CapabilitySupport:\n support: Support\n first_version: Optional[str] = None\n\n def __bool__(self):\n return self.support == Support.Versioned or self.support == Support.Full\n\n\nclass CapabilityDict(DefaultDict[Capability, CapabilitySupport]):\n def __init__(self, vals: Mapping[Capability, CapabilitySupport]):\n super().__init__(self._default)\n self.update(vals)\n\n @staticmethod\n def _default():\n return CapabilitySupport(support=Support.Unknown)\n", "path": "core/dbt/adapters/capability.py"}], "after_files": [{"content": "from dataclasses import dataclass\nfrom enum import Enum\nfrom typing import Optional, DefaultDict, Mapping\n\n\nclass Capability(str, Enum):\n \"\"\"Enumeration of optional adapter features which can be probed using BaseAdapter.capabilities()\"\"\"\n\n SchemaMetadataByRelations = \"SchemaMetadataByRelations\"\n \"\"\"Indicates efficient support for retrieving schema metadata for a list of relations, rather than always retrieving\n all the relations in a schema.\"\"\"\n\n TableLastModifiedMetadata = \"TableLastModifiedMetadata\"\n \"\"\"Indicates support for determining the time of the last table modification by querying database metadata.\"\"\"\n\n\nclass Support(str, Enum):\n Unknown = \"Unknown\"\n \"\"\"The adapter has not declared whether this capability is a feature of the underlying DBMS.\"\"\"\n\n Unsupported = \"Unsupported\"\n \"\"\"This capability is not possible with the underlying DBMS, so the adapter does not implement related macros.\"\"\"\n\n NotImplemented = \"NotImplemented\"\n \"\"\"This capability is available in the underlying DBMS, but support has not yet been implemented in the adapter.\"\"\"\n\n Versioned = \"Versioned\"\n \"\"\"Some versions of the DBMS supported by the adapter support this capability and the adapter has implemented any\n macros needed to use it.\"\"\"\n\n Full = \"Full\"\n \"\"\"All versions of the DBMS supported by the adapter support this capability and the adapter has implemented any\n macros needed to use it.\"\"\"\n\n\n@dataclass\nclass CapabilitySupport:\n support: Support\n first_version: Optional[str] = None\n\n def __bool__(self):\n return self.support == Support.Versioned or self.support == Support.Full\n\n\nclass CapabilityDict(DefaultDict[Capability, CapabilitySupport]):\n def __init__(self, vals: Mapping[Capability, CapabilitySupport]):\n super().__init__(self._default)\n self.update(vals)\n\n @staticmethod\n def _default():\n return CapabilitySupport(support=Support.Unknown)\n", "path": "core/dbt/adapters/capability.py"}]} | 857 | 139 |
gh_patches_debug_47832 | rasdani/github-patches | git_diff | getredash__redash-5354 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Minor Salesforce runner fix
<!--
We use GitHub only for bug reports 🐛
Anything else should be posted to https://discuss.redash.io 👫
🚨For support, help & questions use https://discuss.redash.io/c/support
💡For feature requests & ideas use https://discuss.redash.io/c/feature-requests
**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.
-->
### Issue Summary
A Security Token isn't required in all SFDC environments - depending on configuration. See [here](https://help.salesforce.com/articleView?id=000331668&type=1&mode=1) for more information.
### Steps to Reproduce
1. Add Salesforce as a data source where a token isn't required (and cannot be generated)
2. Cannot proceed without required field
### Technical details:
https://github.com/getredash/redash/blob/be56035bd6d9856361edc6b23d30a38c8f2d2be2/redash/query_runner/salesforce.py#L81
Just remove `token` from the `required` list. Seemed like it'd be faster to create an issue than submit a PR for such a small change
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redash/query_runner/salesforce.py`
Content:
```
1 import logging
2 import re
3 from collections import OrderedDict
4
5 from redash.query_runner import (
6 TYPE_BOOLEAN,
7 TYPE_DATE,
8 TYPE_DATETIME,
9 TYPE_FLOAT,
10 TYPE_INTEGER,
11 TYPE_STRING,
12 BaseQueryRunner,
13 register,
14 )
15 from redash.utils import json_dumps
16
17 logger = logging.getLogger(__name__)
18
19 try:
20 from simple_salesforce import Salesforce as SimpleSalesforce
21 from simple_salesforce import SalesforceError
22 from simple_salesforce.api import DEFAULT_API_VERSION
23
24 enabled = True
25 except ImportError:
26 enabled = False
27
28 # See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm
29 TYPES_MAP = dict(
30 id=TYPE_STRING,
31 string=TYPE_STRING,
32 currency=TYPE_FLOAT,
33 reference=TYPE_STRING,
34 double=TYPE_FLOAT,
35 picklist=TYPE_STRING,
36 date=TYPE_DATE,
37 url=TYPE_STRING,
38 phone=TYPE_STRING,
39 textarea=TYPE_STRING,
40 int=TYPE_INTEGER,
41 datetime=TYPE_DATETIME,
42 boolean=TYPE_BOOLEAN,
43 percent=TYPE_FLOAT,
44 multipicklist=TYPE_STRING,
45 masterrecord=TYPE_STRING,
46 location=TYPE_STRING,
47 JunctionIdList=TYPE_STRING,
48 encryptedstring=TYPE_STRING,
49 email=TYPE_STRING,
50 DataCategoryGroupReference=TYPE_STRING,
51 combobox=TYPE_STRING,
52 calculated=TYPE_STRING,
53 anyType=TYPE_STRING,
54 address=TYPE_STRING,
55 )
56
57 # Query Runner for Salesforce SOQL Queries
58 # For example queries, see:
59 # https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm
60
61
62 class Salesforce(BaseQueryRunner):
63 should_annotate_query = False
64
65 @classmethod
66 def enabled(cls):
67 return enabled
68
69 @classmethod
70 def configuration_schema(cls):
71 return {
72 "type": "object",
73 "properties": {
74 "username": {"type": "string"},
75 "password": {"type": "string"},
76 "token": {"type": "string", "title": "Security Token"},
77 "sandbox": {"type": "boolean"},
78 "api_version": {
79 "type": "string",
80 "title": "Salesforce API Version",
81 "default": DEFAULT_API_VERSION,
82 },
83 },
84 "required": ["username", "password", "token"],
85 "secret": ["password", "token"],
86 }
87
88 def test_connection(self):
89 response = self._get_sf().describe()
90 if response is None:
91 raise Exception("Failed describing objects.")
92 pass
93
94 def _get_sf(self):
95 sf = SimpleSalesforce(
96 username=self.configuration["username"],
97 password=self.configuration["password"],
98 security_token=self.configuration["token"],
99 sandbox=self.configuration.get("sandbox", False),
100 version=self.configuration.get("api_version", DEFAULT_API_VERSION),
101 client_id="Redash",
102 )
103 return sf
104
105 def _clean_value(self, value):
106 if isinstance(value, OrderedDict) and "records" in value:
107 value = value["records"]
108 for row in value:
109 row.pop("attributes", None)
110 return value
111
112 def _get_value(self, dct, dots):
113 for key in dots.split("."):
114 if dct is not None and key in dct:
115 dct = dct.get(key)
116 else:
117 dct = None
118 return dct
119
120 def _get_column_name(self, key, parents=[]):
121 return ".".join(parents + [key])
122
123 def _build_columns(self, sf, child, parents=[]):
124 child_type = child["attributes"]["type"]
125 child_desc = sf.__getattr__(child_type).describe()
126 child_type_map = dict((f["name"], f["type"]) for f in child_desc["fields"])
127 columns = []
128 for key in child.keys():
129 if key != "attributes":
130 if isinstance(child[key], OrderedDict) and "attributes" in child[key]:
131 columns.extend(self._build_columns(sf, child[key], parents + [key]))
132 else:
133 column_name = self._get_column_name(key, parents)
134 key_type = child_type_map.get(key, "string")
135 column_type = TYPES_MAP.get(key_type, TYPE_STRING)
136 columns.append((column_name, column_type))
137 return columns
138
139 def _build_rows(self, columns, records):
140 rows = []
141 for record in records:
142 record.pop("attributes", None)
143 row = dict()
144 for column in columns:
145 key = column[0]
146 value = self._get_value(record, key)
147 row[key] = self._clean_value(value)
148 rows.append(row)
149 return rows
150
151 def run_query(self, query, user):
152 logger.debug("Salesforce is about to execute query: %s", query)
153 query = re.sub(r"/\*(.|\n)*?\*/", "", query).strip()
154 try:
155 columns = []
156 rows = []
157 sf = self._get_sf()
158 response = sf.query_all(query)
159 records = response["records"]
160 if response["totalSize"] > 0 and len(records) == 0:
161 columns = self.fetch_columns([("Count", TYPE_INTEGER)])
162 rows = [{"Count": response["totalSize"]}]
163 elif len(records) > 0:
164 cols = self._build_columns(sf, records[0])
165 rows = self._build_rows(cols, records)
166 columns = self.fetch_columns(cols)
167 error = None
168 data = {"columns": columns, "rows": rows}
169 json_data = json_dumps(data)
170 except SalesforceError as err:
171 error = err.content
172 json_data = None
173 return json_data, error
174
175 def get_schema(self, get_stats=False):
176 sf = self._get_sf()
177 response = sf.describe()
178 if response is None:
179 raise Exception("Failed describing objects.")
180
181 schema = {}
182 for sobject in response["sobjects"]:
183 table_name = sobject["name"]
184 if sobject["queryable"] is True and table_name not in schema:
185 desc = sf.__getattr__(sobject["name"]).describe()
186 fields = desc["fields"]
187 schema[table_name] = {
188 "name": table_name,
189 "columns": [f["name"] for f in fields],
190 }
191 return list(schema.values())
192
193
194 register(Salesforce)
195
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py
--- a/redash/query_runner/salesforce.py
+++ b/redash/query_runner/salesforce.py
@@ -81,7 +81,7 @@
"default": DEFAULT_API_VERSION,
},
},
- "required": ["username", "password", "token"],
+ "required": ["username", "password"],
"secret": ["password", "token"],
}
| {"golden_diff": "diff --git a/redash/query_runner/salesforce.py b/redash/query_runner/salesforce.py\n--- a/redash/query_runner/salesforce.py\n+++ b/redash/query_runner/salesforce.py\n@@ -81,7 +81,7 @@\n \"default\": DEFAULT_API_VERSION,\n },\n },\n- \"required\": [\"username\", \"password\", \"token\"],\n+ \"required\": [\"username\", \"password\"],\n \"secret\": [\"password\", \"token\"],\n }\n", "issue": "Minor Salesforce runner fix\n<!--\r\n\r\nWe use GitHub only for bug reports \ud83d\udc1b\r\n\r\nAnything else should be posted to https://discuss.redash.io \ud83d\udc6b\r\n\r\n\ud83d\udea8For support, help & questions use https://discuss.redash.io/c/support\r\n\ud83d\udca1For feature requests & ideas use https://discuss.redash.io/c/feature-requests\r\n\r\n**Found a security vulnerability?** Please email [email protected] to report any security vulnerabilities. We will acknowledge receipt of your vulnerability and strive to send you regular updates about our progress. If you're curious about the status of your disclosure please feel free to email us again. If you want to encrypt your disclosure email, you can use this PGP key.\r\n\r\n-->\r\n\r\n### Issue Summary\r\n\r\nA Security Token isn't required in all SFDC environments - depending on configuration. See [here](https://help.salesforce.com/articleView?id=000331668&type=1&mode=1) for more information.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Add Salesforce as a data source where a token isn't required (and cannot be generated)\r\n2. Cannot proceed without required field\r\n\r\n### Technical details:\r\n\r\nhttps://github.com/getredash/redash/blob/be56035bd6d9856361edc6b23d30a38c8f2d2be2/redash/query_runner/salesforce.py#L81\r\n\r\nJust remove `token` from the `required` list. Seemed like it'd be faster to create an issue than submit a PR for such a small change\n", "before_files": [{"content": "import logging\nimport re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import (\n TYPE_BOOLEAN,\n TYPE_DATE,\n TYPE_DATETIME,\n TYPE_FLOAT,\n TYPE_INTEGER,\n TYPE_STRING,\n BaseQueryRunner,\n register,\n)\nfrom redash.utils import json_dumps\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from simple_salesforce import Salesforce as SimpleSalesforce\n from simple_salesforce import SalesforceError\n from simple_salesforce.api import DEFAULT_API_VERSION\n\n enabled = True\nexcept ImportError:\n enabled = False\n\n# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm\nTYPES_MAP = dict(\n id=TYPE_STRING,\n string=TYPE_STRING,\n currency=TYPE_FLOAT,\n reference=TYPE_STRING,\n double=TYPE_FLOAT,\n picklist=TYPE_STRING,\n date=TYPE_DATE,\n url=TYPE_STRING,\n phone=TYPE_STRING,\n textarea=TYPE_STRING,\n int=TYPE_INTEGER,\n datetime=TYPE_DATETIME,\n boolean=TYPE_BOOLEAN,\n percent=TYPE_FLOAT,\n multipicklist=TYPE_STRING,\n masterrecord=TYPE_STRING,\n location=TYPE_STRING,\n JunctionIdList=TYPE_STRING,\n encryptedstring=TYPE_STRING,\n email=TYPE_STRING,\n DataCategoryGroupReference=TYPE_STRING,\n combobox=TYPE_STRING,\n calculated=TYPE_STRING,\n anyType=TYPE_STRING,\n address=TYPE_STRING,\n)\n\n# Query Runner for Salesforce SOQL Queries\n# For example queries, see:\n# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm\n\n\nclass Salesforce(BaseQueryRunner):\n should_annotate_query = False\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"token\": {\"type\": \"string\", \"title\": \"Security Token\"},\n \"sandbox\": {\"type\": \"boolean\"},\n \"api_version\": {\n \"type\": \"string\",\n \"title\": \"Salesforce API Version\",\n \"default\": DEFAULT_API_VERSION,\n },\n },\n \"required\": [\"username\", \"password\", \"token\"],\n \"secret\": [\"password\", \"token\"],\n }\n\n def test_connection(self):\n response = self._get_sf().describe()\n if response is None:\n raise Exception(\"Failed describing objects.\")\n pass\n\n def _get_sf(self):\n sf = SimpleSalesforce(\n username=self.configuration[\"username\"],\n password=self.configuration[\"password\"],\n security_token=self.configuration[\"token\"],\n sandbox=self.configuration.get(\"sandbox\", False),\n version=self.configuration.get(\"api_version\", DEFAULT_API_VERSION),\n client_id=\"Redash\",\n )\n return sf\n\n def _clean_value(self, value):\n if isinstance(value, OrderedDict) and \"records\" in value:\n value = value[\"records\"]\n for row in value:\n row.pop(\"attributes\", None)\n return value\n\n def _get_value(self, dct, dots):\n for key in dots.split(\".\"):\n if dct is not None and key in dct:\n dct = dct.get(key)\n else:\n dct = None\n return dct\n\n def _get_column_name(self, key, parents=[]):\n return \".\".join(parents + [key])\n\n def _build_columns(self, sf, child, parents=[]):\n child_type = child[\"attributes\"][\"type\"]\n child_desc = sf.__getattr__(child_type).describe()\n child_type_map = dict((f[\"name\"], f[\"type\"]) for f in child_desc[\"fields\"])\n columns = []\n for key in child.keys():\n if key != \"attributes\":\n if isinstance(child[key], OrderedDict) and \"attributes\" in child[key]:\n columns.extend(self._build_columns(sf, child[key], parents + [key]))\n else:\n column_name = self._get_column_name(key, parents)\n key_type = child_type_map.get(key, \"string\")\n column_type = TYPES_MAP.get(key_type, TYPE_STRING)\n columns.append((column_name, column_type))\n return columns\n\n def _build_rows(self, columns, records):\n rows = []\n for record in records:\n record.pop(\"attributes\", None)\n row = dict()\n for column in columns:\n key = column[0]\n value = self._get_value(record, key)\n row[key] = self._clean_value(value)\n rows.append(row)\n return rows\n\n def run_query(self, query, user):\n logger.debug(\"Salesforce is about to execute query: %s\", query)\n query = re.sub(r\"/\\*(.|\\n)*?\\*/\", \"\", query).strip()\n try:\n columns = []\n rows = []\n sf = self._get_sf()\n response = sf.query_all(query)\n records = response[\"records\"]\n if response[\"totalSize\"] > 0 and len(records) == 0:\n columns = self.fetch_columns([(\"Count\", TYPE_INTEGER)])\n rows = [{\"Count\": response[\"totalSize\"]}]\n elif len(records) > 0:\n cols = self._build_columns(sf, records[0])\n rows = self._build_rows(cols, records)\n columns = self.fetch_columns(cols)\n error = None\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n except SalesforceError as err:\n error = err.content\n json_data = None\n return json_data, error\n\n def get_schema(self, get_stats=False):\n sf = self._get_sf()\n response = sf.describe()\n if response is None:\n raise Exception(\"Failed describing objects.\")\n\n schema = {}\n for sobject in response[\"sobjects\"]:\n table_name = sobject[\"name\"]\n if sobject[\"queryable\"] is True and table_name not in schema:\n desc = sf.__getattr__(sobject[\"name\"]).describe()\n fields = desc[\"fields\"]\n schema[table_name] = {\n \"name\": table_name,\n \"columns\": [f[\"name\"] for f in fields],\n }\n return list(schema.values())\n\n\nregister(Salesforce)\n", "path": "redash/query_runner/salesforce.py"}], "after_files": [{"content": "import logging\nimport re\nfrom collections import OrderedDict\n\nfrom redash.query_runner import (\n TYPE_BOOLEAN,\n TYPE_DATE,\n TYPE_DATETIME,\n TYPE_FLOAT,\n TYPE_INTEGER,\n TYPE_STRING,\n BaseQueryRunner,\n register,\n)\nfrom redash.utils import json_dumps\n\nlogger = logging.getLogger(__name__)\n\ntry:\n from simple_salesforce import Salesforce as SimpleSalesforce\n from simple_salesforce import SalesforceError\n from simple_salesforce.api import DEFAULT_API_VERSION\n\n enabled = True\nexcept ImportError:\n enabled = False\n\n# See https://developer.salesforce.com/docs/atlas.en-us.api.meta/api/field_types.htm\nTYPES_MAP = dict(\n id=TYPE_STRING,\n string=TYPE_STRING,\n currency=TYPE_FLOAT,\n reference=TYPE_STRING,\n double=TYPE_FLOAT,\n picklist=TYPE_STRING,\n date=TYPE_DATE,\n url=TYPE_STRING,\n phone=TYPE_STRING,\n textarea=TYPE_STRING,\n int=TYPE_INTEGER,\n datetime=TYPE_DATETIME,\n boolean=TYPE_BOOLEAN,\n percent=TYPE_FLOAT,\n multipicklist=TYPE_STRING,\n masterrecord=TYPE_STRING,\n location=TYPE_STRING,\n JunctionIdList=TYPE_STRING,\n encryptedstring=TYPE_STRING,\n email=TYPE_STRING,\n DataCategoryGroupReference=TYPE_STRING,\n combobox=TYPE_STRING,\n calculated=TYPE_STRING,\n anyType=TYPE_STRING,\n address=TYPE_STRING,\n)\n\n# Query Runner for Salesforce SOQL Queries\n# For example queries, see:\n# https://developer.salesforce.com/docs/atlas.en-us.soql_sosl.meta/soql_sosl/sforce_api_calls_soql_select_examples.htm\n\n\nclass Salesforce(BaseQueryRunner):\n should_annotate_query = False\n\n @classmethod\n def enabled(cls):\n return enabled\n\n @classmethod\n def configuration_schema(cls):\n return {\n \"type\": \"object\",\n \"properties\": {\n \"username\": {\"type\": \"string\"},\n \"password\": {\"type\": \"string\"},\n \"token\": {\"type\": \"string\", \"title\": \"Security Token\"},\n \"sandbox\": {\"type\": \"boolean\"},\n \"api_version\": {\n \"type\": \"string\",\n \"title\": \"Salesforce API Version\",\n \"default\": DEFAULT_API_VERSION,\n },\n },\n \"required\": [\"username\", \"password\"],\n \"secret\": [\"password\", \"token\"],\n }\n\n def test_connection(self):\n response = self._get_sf().describe()\n if response is None:\n raise Exception(\"Failed describing objects.\")\n pass\n\n def _get_sf(self):\n sf = SimpleSalesforce(\n username=self.configuration[\"username\"],\n password=self.configuration[\"password\"],\n security_token=self.configuration[\"token\"],\n sandbox=self.configuration.get(\"sandbox\", False),\n version=self.configuration.get(\"api_version\", DEFAULT_API_VERSION),\n client_id=\"Redash\",\n )\n return sf\n\n def _clean_value(self, value):\n if isinstance(value, OrderedDict) and \"records\" in value:\n value = value[\"records\"]\n for row in value:\n row.pop(\"attributes\", None)\n return value\n\n def _get_value(self, dct, dots):\n for key in dots.split(\".\"):\n if dct is not None and key in dct:\n dct = dct.get(key)\n else:\n dct = None\n return dct\n\n def _get_column_name(self, key, parents=[]):\n return \".\".join(parents + [key])\n\n def _build_columns(self, sf, child, parents=[]):\n child_type = child[\"attributes\"][\"type\"]\n child_desc = sf.__getattr__(child_type).describe()\n child_type_map = dict((f[\"name\"], f[\"type\"]) for f in child_desc[\"fields\"])\n columns = []\n for key in child.keys():\n if key != \"attributes\":\n if isinstance(child[key], OrderedDict) and \"attributes\" in child[key]:\n columns.extend(self._build_columns(sf, child[key], parents + [key]))\n else:\n column_name = self._get_column_name(key, parents)\n key_type = child_type_map.get(key, \"string\")\n column_type = TYPES_MAP.get(key_type, TYPE_STRING)\n columns.append((column_name, column_type))\n return columns\n\n def _build_rows(self, columns, records):\n rows = []\n for record in records:\n record.pop(\"attributes\", None)\n row = dict()\n for column in columns:\n key = column[0]\n value = self._get_value(record, key)\n row[key] = self._clean_value(value)\n rows.append(row)\n return rows\n\n def run_query(self, query, user):\n logger.debug(\"Salesforce is about to execute query: %s\", query)\n query = re.sub(r\"/\\*(.|\\n)*?\\*/\", \"\", query).strip()\n try:\n columns = []\n rows = []\n sf = self._get_sf()\n response = sf.query_all(query)\n records = response[\"records\"]\n if response[\"totalSize\"] > 0 and len(records) == 0:\n columns = self.fetch_columns([(\"Count\", TYPE_INTEGER)])\n rows = [{\"Count\": response[\"totalSize\"]}]\n elif len(records) > 0:\n cols = self._build_columns(sf, records[0])\n rows = self._build_rows(cols, records)\n columns = self.fetch_columns(cols)\n error = None\n data = {\"columns\": columns, \"rows\": rows}\n json_data = json_dumps(data)\n except SalesforceError as err:\n error = err.content\n json_data = None\n return json_data, error\n\n def get_schema(self, get_stats=False):\n sf = self._get_sf()\n response = sf.describe()\n if response is None:\n raise Exception(\"Failed describing objects.\")\n\n schema = {}\n for sobject in response[\"sobjects\"]:\n table_name = sobject[\"name\"]\n if sobject[\"queryable\"] is True and table_name not in schema:\n desc = sf.__getattr__(sobject[\"name\"]).describe()\n fields = desc[\"fields\"]\n schema[table_name] = {\n \"name\": table_name,\n \"columns\": [f[\"name\"] for f in fields],\n }\n return list(schema.values())\n\n\nregister(Salesforce)\n", "path": "redash/query_runner/salesforce.py"}]} | 2,435 | 105 |
gh_patches_debug_10958 | rasdani/github-patches | git_diff | python-telegram-bot__python-telegram-bot-1760 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] v12.4 breaks PicklePersistence
<!--
Thanks for reporting issues of python-telegram-bot!
Use this template to notify us if you found a bug.
To make it easier for us to help you please enter detailed information below.
Please note, we only support the latest version of python-telegram-bot and
master branch. Please make sure to upgrade & recreate the issue on the latest
version prior to opening an issue.
-->
### Steps to reproduce
1. Have a bot using PicklePersistence with singlefile=True
2. Upgrade to v12.4
3. restart bot
### Expected behaviour
pickled file is read correctly
### Actual behaviour
key error `bot_data` is thrown
### Current workaround:
Add an empty dict `bot_data` to the file manually. Quick and dirty script:
```
import pickle
filename = 'my_pickle_persistence_file'
with (open(filename, 'rb')) as file:
data = pickle.load(file)
data['bot_data'] = {}
with open(filename, 'wb') as f:
pickle.dump(data, f)
```
Will be closed by #1760
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `telegram/ext/picklepersistence.py`
Content:
```
1 #!/usr/bin/env python
2 #
3 # A library that provides a Python interface to the Telegram Bot API
4 # Copyright (C) 2015-2020
5 # Leandro Toledo de Souza <[email protected]>
6 #
7 # This program is free software: you can redistribute it and/or modify
8 # it under the terms of the GNU Lesser Public License as published by
9 # the Free Software Foundation, either version 3 of the License, or
10 # (at your option) any later version.
11 #
12 # This program is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU Lesser Public License for more details.
16 #
17 # You should have received a copy of the GNU Lesser Public License
18 # along with this program. If not, see [http://www.gnu.org/licenses/].
19 """This module contains the PicklePersistence class."""
20 import pickle
21 from collections import defaultdict
22 from copy import deepcopy
23
24 from telegram.ext import BasePersistence
25
26
27 class PicklePersistence(BasePersistence):
28 """Using python's builtin pickle for making you bot persistent.
29
30 Attributes:
31 filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
32 is false this will be used as a prefix.
33 store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this
34 persistence class.
35 store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this
36 persistence class.
37 store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this
38 persistence class.
39 single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of
40 `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
41 ``True``.
42 on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`
43 is called and keep data in memory until that happens. When ``False`` will store data
44 on any transaction *and* on call fo :meth:`flush`. Default is ``False``.
45
46 Args:
47 filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`
48 is false this will be used as a prefix.
49 store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this
50 persistence class. Default is ``True``.
51 store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this
52 persistence class. Default is ``True``.
53 store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this
54 persistence class. Default is ``True`` .
55 single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of
56 `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is
57 ``True``.
58 on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`
59 is called and keep data in memory until that happens. When ``False`` will store data
60 on any transaction *and* on call fo :meth:`flush`. Default is ``False``.
61 """
62
63 def __init__(self, filename,
64 store_user_data=True,
65 store_chat_data=True,
66 store_bot_data=True,
67 single_file=True,
68 on_flush=False):
69 super(PicklePersistence, self).__init__(store_user_data=store_user_data,
70 store_chat_data=store_chat_data,
71 store_bot_data=store_bot_data)
72 self.filename = filename
73 self.single_file = single_file
74 self.on_flush = on_flush
75 self.user_data = None
76 self.chat_data = None
77 self.bot_data = None
78 self.conversations = None
79
80 def load_singlefile(self):
81 try:
82 filename = self.filename
83 with open(self.filename, "rb") as f:
84 all = pickle.load(f)
85 self.user_data = defaultdict(dict, all['user_data'])
86 self.chat_data = defaultdict(dict, all['chat_data'])
87 self.bot_data = all['bot_data']
88 self.conversations = all['conversations']
89 except IOError:
90 self.conversations = {}
91 self.user_data = defaultdict(dict)
92 self.chat_data = defaultdict(dict)
93 self.bot_data = {}
94 except pickle.UnpicklingError:
95 raise TypeError("File {} does not contain valid pickle data".format(filename))
96 except Exception:
97 raise TypeError("Something went wrong unpickling {}".format(filename))
98
99 def load_file(self, filename):
100 try:
101 with open(filename, "rb") as f:
102 return pickle.load(f)
103 except IOError:
104 return None
105 except pickle.UnpicklingError:
106 raise TypeError("File {} does not contain valid pickle data".format(filename))
107 except Exception:
108 raise TypeError("Something went wrong unpickling {}".format(filename))
109
110 def dump_singlefile(self):
111 with open(self.filename, "wb") as f:
112 all = {'conversations': self.conversations, 'user_data': self.user_data,
113 'chat_data': self.chat_data, 'bot_data': self.bot_data}
114 pickle.dump(all, f)
115
116 def dump_file(self, filename, data):
117 with open(filename, "wb") as f:
118 pickle.dump(data, f)
119
120 def get_user_data(self):
121 """Returns the user_data from the pickle file if it exsists or an empty defaultdict.
122
123 Returns:
124 :obj:`defaultdict`: The restored user data.
125 """
126 if self.user_data:
127 pass
128 elif not self.single_file:
129 filename = "{}_user_data".format(self.filename)
130 data = self.load_file(filename)
131 if not data:
132 data = defaultdict(dict)
133 else:
134 data = defaultdict(dict, data)
135 self.user_data = data
136 else:
137 self.load_singlefile()
138 return deepcopy(self.user_data)
139
140 def get_chat_data(self):
141 """Returns the chat_data from the pickle file if it exsists or an empty defaultdict.
142
143 Returns:
144 :obj:`defaultdict`: The restored chat data.
145 """
146 if self.chat_data:
147 pass
148 elif not self.single_file:
149 filename = "{}_chat_data".format(self.filename)
150 data = self.load_file(filename)
151 if not data:
152 data = defaultdict(dict)
153 else:
154 data = defaultdict(dict, data)
155 self.chat_data = data
156 else:
157 self.load_singlefile()
158 return deepcopy(self.chat_data)
159
160 def get_bot_data(self):
161 """Returns the bot_data from the pickle file if it exsists or an empty dict.
162
163 Returns:
164 :obj:`defaultdict`: The restored bot data.
165 """
166 if self.bot_data:
167 pass
168 elif not self.single_file:
169 filename = "{}_bot_data".format(self.filename)
170 data = self.load_file(filename)
171 if not data:
172 data = {}
173 self.bot_data = data
174 else:
175 self.load_singlefile()
176 return deepcopy(self.bot_data)
177
178 def get_conversations(self, name):
179 """Returns the conversations from the pickle file if it exsists or an empty defaultdict.
180
181 Args:
182 name (:obj:`str`): The handlers name.
183
184 Returns:
185 :obj:`dict`: The restored conversations for the handler.
186 """
187 if self.conversations:
188 pass
189 elif not self.single_file:
190 filename = "{}_conversations".format(self.filename)
191 data = self.load_file(filename)
192 if not data:
193 data = {name: {}}
194 self.conversations = data
195 else:
196 self.load_singlefile()
197 return self.conversations.get(name, {}).copy()
198
199 def update_conversation(self, name, key, new_state):
200 """Will update the conversations for the given handler and depending on :attr:`on_flush`
201 save the pickle file.
202
203 Args:
204 name (:obj:`str`): The handlers name.
205 key (:obj:`tuple`): The key the state is changed for.
206 new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.
207 """
208 if self.conversations.setdefault(name, {}).get(key) == new_state:
209 return
210 self.conversations[name][key] = new_state
211 if not self.on_flush:
212 if not self.single_file:
213 filename = "{}_conversations".format(self.filename)
214 self.dump_file(filename, self.conversations)
215 else:
216 self.dump_singlefile()
217
218 def update_user_data(self, user_id, data):
219 """Will update the user_data (if changed) and depending on :attr:`on_flush` save the
220 pickle file.
221
222 Args:
223 user_id (:obj:`int`): The user the data might have been changed for.
224 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].
225 """
226 if self.user_data.get(user_id) == data:
227 return
228 self.user_data[user_id] = data
229 if not self.on_flush:
230 if not self.single_file:
231 filename = "{}_user_data".format(self.filename)
232 self.dump_file(filename, self.user_data)
233 else:
234 self.dump_singlefile()
235
236 def update_chat_data(self, chat_id, data):
237 """Will update the chat_data (if changed) and depending on :attr:`on_flush` save the
238 pickle file.
239
240 Args:
241 chat_id (:obj:`int`): The chat the data might have been changed for.
242 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].
243 """
244 if self.chat_data.get(chat_id) == data:
245 return
246 self.chat_data[chat_id] = data
247 if not self.on_flush:
248 if not self.single_file:
249 filename = "{}_chat_data".format(self.filename)
250 self.dump_file(filename, self.chat_data)
251 else:
252 self.dump_singlefile()
253
254 def update_bot_data(self, data):
255 """Will update the bot_data (if changed) and depending on :attr:`on_flush` save the
256 pickle file.
257
258 Args:
259 data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.
260 """
261 if self.bot_data == data:
262 return
263 self.bot_data = data.copy()
264 if not self.on_flush:
265 if not self.single_file:
266 filename = "{}_bot_data".format(self.filename)
267 self.dump_file(filename, self.bot_data)
268 else:
269 self.dump_singlefile()
270
271 def flush(self):
272 """ Will save all data in memory to pickle file(s).
273 """
274 if self.single_file:
275 if self.user_data or self.chat_data or self.conversations:
276 self.dump_singlefile()
277 else:
278 if self.user_data:
279 self.dump_file("{}_user_data".format(self.filename), self.user_data)
280 if self.chat_data:
281 self.dump_file("{}_chat_data".format(self.filename), self.chat_data)
282 if self.bot_data:
283 self.dump_file("{}_bot_data".format(self.filename), self.bot_data)
284 if self.conversations:
285 self.dump_file("{}_conversations".format(self.filename), self.conversations)
286
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/telegram/ext/picklepersistence.py b/telegram/ext/picklepersistence.py
--- a/telegram/ext/picklepersistence.py
+++ b/telegram/ext/picklepersistence.py
@@ -84,7 +84,8 @@
all = pickle.load(f)
self.user_data = defaultdict(dict, all['user_data'])
self.chat_data = defaultdict(dict, all['chat_data'])
- self.bot_data = all['bot_data']
+ # For backwards compatibility with files not containing bot data
+ self.bot_data = all.get('bot_data', {})
self.conversations = all['conversations']
except IOError:
self.conversations = {}
| {"golden_diff": "diff --git a/telegram/ext/picklepersistence.py b/telegram/ext/picklepersistence.py\n--- a/telegram/ext/picklepersistence.py\n+++ b/telegram/ext/picklepersistence.py\n@@ -84,7 +84,8 @@\n all = pickle.load(f)\n self.user_data = defaultdict(dict, all['user_data'])\n self.chat_data = defaultdict(dict, all['chat_data'])\n- self.bot_data = all['bot_data']\n+ # For backwards compatibility with files not containing bot data\n+ self.bot_data = all.get('bot_data', {})\n self.conversations = all['conversations']\n except IOError:\n self.conversations = {}\n", "issue": "[BUG] v12.4 breaks PicklePersistence\n<!--\r\nThanks for reporting issues of python-telegram-bot!\r\n\r\nUse this template to notify us if you found a bug.\r\n\r\nTo make it easier for us to help you please enter detailed information below.\r\n\r\nPlease note, we only support the latest version of python-telegram-bot and\r\nmaster branch. Please make sure to upgrade & recreate the issue on the latest\r\nversion prior to opening an issue.\r\n-->\r\n### Steps to reproduce\r\n1. Have a bot using PicklePersistence with singlefile=True\r\n\r\n2. Upgrade to v12.4\r\n\r\n3. restart bot\r\n\r\n### Expected behaviour\r\npickled file is read correctly\r\n\r\n### Actual behaviour\r\nkey error `bot_data` is thrown\r\n\r\n### Current workaround:\r\nAdd an empty dict `bot_data` to the file manually. Quick and dirty script:\r\n```\r\nimport pickle\r\n\r\nfilename = 'my_pickle_persistence_file'\r\n\r\nwith (open(filename, 'rb')) as file:\r\n data = pickle.load(file)\r\n\r\ndata['bot_data'] = {}\r\n\r\nwith open(filename, 'wb') as f:\r\n pickle.dump(data, f)\r\n```\r\n\r\nWill be closed by #1760 \n", "before_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the PicklePersistence class.\"\"\"\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import BasePersistence\n\n\nclass PicklePersistence(BasePersistence):\n \"\"\"Using python's builtin pickle for making you bot persistent.\n\n Attributes:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this\n persistence class.\n single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n\n Args:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this\n persistence class. Default is ``True`` .\n single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n\n def __init__(self, filename,\n store_user_data=True,\n store_chat_data=True,\n store_bot_data=True,\n single_file=True,\n on_flush=False):\n super(PicklePersistence, self).__init__(store_user_data=store_user_data,\n store_chat_data=store_chat_data,\n store_bot_data=store_bot_data)\n self.filename = filename\n self.single_file = single_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n self.bot_data = None\n self.conversations = None\n\n def load_singlefile(self):\n try:\n filename = self.filename\n with open(self.filename, \"rb\") as f:\n all = pickle.load(f)\n self.user_data = defaultdict(dict, all['user_data'])\n self.chat_data = defaultdict(dict, all['chat_data'])\n self.bot_data = all['bot_data']\n self.conversations = all['conversations']\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n self.bot_data = {}\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def load_file(self, filename):\n try:\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n except IOError:\n return None\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def dump_singlefile(self):\n with open(self.filename, \"wb\") as f:\n all = {'conversations': self.conversations, 'user_data': self.user_data,\n 'chat_data': self.chat_data, 'bot_data': self.bot_data}\n pickle.dump(all, f)\n\n def dump_file(self, filename, data):\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n def get_user_data(self):\n \"\"\"Returns the user_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored user data.\n \"\"\"\n if self.user_data:\n pass\n elif not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.user_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.user_data)\n\n def get_chat_data(self):\n \"\"\"Returns the chat_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored chat data.\n \"\"\"\n if self.chat_data:\n pass\n elif not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.chat_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.chat_data)\n\n def get_bot_data(self):\n \"\"\"Returns the bot_data from the pickle file if it exsists or an empty dict.\n\n Returns:\n :obj:`defaultdict`: The restored bot data.\n \"\"\"\n if self.bot_data:\n pass\n elif not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {}\n self.bot_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.bot_data)\n\n def get_conversations(self, name):\n \"\"\"Returns the conversations from the pickle file if it exsists or an empty defaultdict.\n\n Args:\n name (:obj:`str`): The handlers name.\n\n Returns:\n :obj:`dict`: The restored conversations for the handler.\n \"\"\"\n if self.conversations:\n pass\n elif not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self.load_singlefile()\n return self.conversations.get(name, {}).copy()\n\n def update_conversation(self, name, key, new_state):\n \"\"\"Will update the conversations for the given handler and depending on :attr:`on_flush`\n save the pickle file.\n\n Args:\n name (:obj:`str`): The handlers name.\n key (:obj:`tuple`): The key the state is changed for.\n new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.\n \"\"\"\n if self.conversations.setdefault(name, {}).get(key) == new_state:\n return\n self.conversations[name][key] = new_state\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n self.dump_file(filename, self.conversations)\n else:\n self.dump_singlefile()\n\n def update_user_data(self, user_id, data):\n \"\"\"Will update the user_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n user_id (:obj:`int`): The user the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].\n \"\"\"\n if self.user_data.get(user_id) == data:\n return\n self.user_data[user_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n self.dump_file(filename, self.user_data)\n else:\n self.dump_singlefile()\n\n def update_chat_data(self, chat_id, data):\n \"\"\"Will update the chat_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n chat_id (:obj:`int`): The chat the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].\n \"\"\"\n if self.chat_data.get(chat_id) == data:\n return\n self.chat_data[chat_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n self.dump_file(filename, self.chat_data)\n else:\n self.dump_singlefile()\n\n def update_bot_data(self, data):\n \"\"\"Will update the bot_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.\n \"\"\"\n if self.bot_data == data:\n return\n self.bot_data = data.copy()\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n self.dump_file(filename, self.bot_data)\n else:\n self.dump_singlefile()\n\n def flush(self):\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(\"{}_user_data\".format(self.filename), self.user_data)\n if self.chat_data:\n self.dump_file(\"{}_chat_data\".format(self.filename), self.chat_data)\n if self.bot_data:\n self.dump_file(\"{}_bot_data\".format(self.filename), self.bot_data)\n if self.conversations:\n self.dump_file(\"{}_conversations\".format(self.filename), self.conversations)\n", "path": "telegram/ext/picklepersistence.py"}], "after_files": [{"content": "#!/usr/bin/env python\n#\n# A library that provides a Python interface to the Telegram Bot API\n# Copyright (C) 2015-2020\n# Leandro Toledo de Souza <[email protected]>\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser Public License for more details.\n#\n# You should have received a copy of the GNU Lesser Public License\n# along with this program. If not, see [http://www.gnu.org/licenses/].\n\"\"\"This module contains the PicklePersistence class.\"\"\"\nimport pickle\nfrom collections import defaultdict\nfrom copy import deepcopy\n\nfrom telegram.ext import BasePersistence\n\n\nclass PicklePersistence(BasePersistence):\n \"\"\"Using python's builtin pickle for making you bot persistent.\n\n Attributes:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_chat_data (:obj:`bool`): Optional. Whether user_data should be saved by this\n persistence class.\n store_bot_data (:obj:`bool`): Optional. Whether bot_data should be saved by this\n persistence class.\n single_file (:obj:`bool`): Optional. When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n\n Args:\n filename (:obj:`str`): The filename for storing the pickle files. When :attr:`single_file`\n is false this will be used as a prefix.\n store_user_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_chat_data (:obj:`bool`, optional): Whether user_data should be saved by this\n persistence class. Default is ``True``.\n store_bot_data (:obj:`bool`, optional): Whether bot_data should be saved by this\n persistence class. Default is ``True`` .\n single_file (:obj:`bool`, optional): When ``False`` will store 3 sperate files of\n `filename_user_data`, `filename_chat_data` and `filename_conversations`. Default is\n ``True``.\n on_flush (:obj:`bool`, optional): When ``True`` will only save to file when :meth:`flush`\n is called and keep data in memory until that happens. When ``False`` will store data\n on any transaction *and* on call fo :meth:`flush`. Default is ``False``.\n \"\"\"\n\n def __init__(self, filename,\n store_user_data=True,\n store_chat_data=True,\n store_bot_data=True,\n single_file=True,\n on_flush=False):\n super(PicklePersistence, self).__init__(store_user_data=store_user_data,\n store_chat_data=store_chat_data,\n store_bot_data=store_bot_data)\n self.filename = filename\n self.single_file = single_file\n self.on_flush = on_flush\n self.user_data = None\n self.chat_data = None\n self.bot_data = None\n self.conversations = None\n\n def load_singlefile(self):\n try:\n filename = self.filename\n with open(self.filename, \"rb\") as f:\n all = pickle.load(f)\n self.user_data = defaultdict(dict, all['user_data'])\n self.chat_data = defaultdict(dict, all['chat_data'])\n # For backwards compatibility with files not containing bot data\n self.bot_data = all.get('bot_data', {})\n self.conversations = all['conversations']\n except IOError:\n self.conversations = {}\n self.user_data = defaultdict(dict)\n self.chat_data = defaultdict(dict)\n self.bot_data = {}\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def load_file(self, filename):\n try:\n with open(filename, \"rb\") as f:\n return pickle.load(f)\n except IOError:\n return None\n except pickle.UnpicklingError:\n raise TypeError(\"File {} does not contain valid pickle data\".format(filename))\n except Exception:\n raise TypeError(\"Something went wrong unpickling {}\".format(filename))\n\n def dump_singlefile(self):\n with open(self.filename, \"wb\") as f:\n all = {'conversations': self.conversations, 'user_data': self.user_data,\n 'chat_data': self.chat_data, 'bot_data': self.bot_data}\n pickle.dump(all, f)\n\n def dump_file(self, filename, data):\n with open(filename, \"wb\") as f:\n pickle.dump(data, f)\n\n def get_user_data(self):\n \"\"\"Returns the user_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored user data.\n \"\"\"\n if self.user_data:\n pass\n elif not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.user_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.user_data)\n\n def get_chat_data(self):\n \"\"\"Returns the chat_data from the pickle file if it exsists or an empty defaultdict.\n\n Returns:\n :obj:`defaultdict`: The restored chat data.\n \"\"\"\n if self.chat_data:\n pass\n elif not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = defaultdict(dict)\n else:\n data = defaultdict(dict, data)\n self.chat_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.chat_data)\n\n def get_bot_data(self):\n \"\"\"Returns the bot_data from the pickle file if it exsists or an empty dict.\n\n Returns:\n :obj:`defaultdict`: The restored bot data.\n \"\"\"\n if self.bot_data:\n pass\n elif not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {}\n self.bot_data = data\n else:\n self.load_singlefile()\n return deepcopy(self.bot_data)\n\n def get_conversations(self, name):\n \"\"\"Returns the conversations from the pickle file if it exsists or an empty defaultdict.\n\n Args:\n name (:obj:`str`): The handlers name.\n\n Returns:\n :obj:`dict`: The restored conversations for the handler.\n \"\"\"\n if self.conversations:\n pass\n elif not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n data = self.load_file(filename)\n if not data:\n data = {name: {}}\n self.conversations = data\n else:\n self.load_singlefile()\n return self.conversations.get(name, {}).copy()\n\n def update_conversation(self, name, key, new_state):\n \"\"\"Will update the conversations for the given handler and depending on :attr:`on_flush`\n save the pickle file.\n\n Args:\n name (:obj:`str`): The handlers name.\n key (:obj:`tuple`): The key the state is changed for.\n new_state (:obj:`tuple` | :obj:`any`): The new state for the given key.\n \"\"\"\n if self.conversations.setdefault(name, {}).get(key) == new_state:\n return\n self.conversations[name][key] = new_state\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_conversations\".format(self.filename)\n self.dump_file(filename, self.conversations)\n else:\n self.dump_singlefile()\n\n def update_user_data(self, user_id, data):\n \"\"\"Will update the user_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n user_id (:obj:`int`): The user the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.user_data` [user_id].\n \"\"\"\n if self.user_data.get(user_id) == data:\n return\n self.user_data[user_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_user_data\".format(self.filename)\n self.dump_file(filename, self.user_data)\n else:\n self.dump_singlefile()\n\n def update_chat_data(self, chat_id, data):\n \"\"\"Will update the chat_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n chat_id (:obj:`int`): The chat the data might have been changed for.\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.chat_data` [chat_id].\n \"\"\"\n if self.chat_data.get(chat_id) == data:\n return\n self.chat_data[chat_id] = data\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_chat_data\".format(self.filename)\n self.dump_file(filename, self.chat_data)\n else:\n self.dump_singlefile()\n\n def update_bot_data(self, data):\n \"\"\"Will update the bot_data (if changed) and depending on :attr:`on_flush` save the\n pickle file.\n\n Args:\n data (:obj:`dict`): The :attr:`telegram.ext.dispatcher.bot_data`.\n \"\"\"\n if self.bot_data == data:\n return\n self.bot_data = data.copy()\n if not self.on_flush:\n if not self.single_file:\n filename = \"{}_bot_data\".format(self.filename)\n self.dump_file(filename, self.bot_data)\n else:\n self.dump_singlefile()\n\n def flush(self):\n \"\"\" Will save all data in memory to pickle file(s).\n \"\"\"\n if self.single_file:\n if self.user_data or self.chat_data or self.conversations:\n self.dump_singlefile()\n else:\n if self.user_data:\n self.dump_file(\"{}_user_data\".format(self.filename), self.user_data)\n if self.chat_data:\n self.dump_file(\"{}_chat_data\".format(self.filename), self.chat_data)\n if self.bot_data:\n self.dump_file(\"{}_bot_data\".format(self.filename), self.bot_data)\n if self.conversations:\n self.dump_file(\"{}_conversations\".format(self.filename), self.conversations)\n", "path": "telegram/ext/picklepersistence.py"}]} | 3,688 | 146 |
gh_patches_debug_33779 | rasdani/github-patches | git_diff | CTFd__CTFd-1911 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
IP to City Database
I think we can provide an IP to city database now instead of just showing country.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/utils/initialization/__init__.py`
Content:
```
1 import datetime
2 import logging
3 import os
4 import sys
5
6 from flask import abort, redirect, render_template, request, session, url_for
7 from sqlalchemy.exc import IntegrityError, InvalidRequestError
8 from werkzeug.middleware.dispatcher import DispatcherMiddleware
9
10 from CTFd.cache import clear_user_recent_ips
11 from CTFd.exceptions import UserNotFoundException, UserTokenExpiredException
12 from CTFd.models import Tracking, db
13 from CTFd.utils import config, get_config, markdown
14 from CTFd.utils.config import (
15 can_send_mail,
16 ctf_logo,
17 ctf_name,
18 ctf_theme,
19 integrations,
20 is_setup,
21 )
22 from CTFd.utils.config.pages import get_pages
23 from CTFd.utils.dates import isoformat, unix_time, unix_time_millis
24 from CTFd.utils.events import EventManager, RedisEventManager
25 from CTFd.utils.humanize.words import pluralize
26 from CTFd.utils.modes import generate_account_url, get_mode_as_word
27 from CTFd.utils.plugins import (
28 get_configurable_plugins,
29 get_registered_admin_scripts,
30 get_registered_admin_stylesheets,
31 get_registered_scripts,
32 get_registered_stylesheets,
33 )
34 from CTFd.utils.security.auth import login_user, logout_user, lookup_user_token
35 from CTFd.utils.security.csrf import generate_nonce
36 from CTFd.utils.user import (
37 authed,
38 get_current_team_attrs,
39 get_current_user_attrs,
40 get_current_user_recent_ips,
41 get_ip,
42 is_admin,
43 )
44
45
46 def init_template_filters(app):
47 app.jinja_env.filters["markdown"] = markdown
48 app.jinja_env.filters["unix_time"] = unix_time
49 app.jinja_env.filters["unix_time_millis"] = unix_time_millis
50 app.jinja_env.filters["isoformat"] = isoformat
51 app.jinja_env.filters["pluralize"] = pluralize
52
53
54 def init_template_globals(app):
55 from CTFd.constants import JINJA_ENUMS
56 from CTFd.constants.config import Configs
57 from CTFd.constants.plugins import Plugins
58 from CTFd.constants.sessions import Session
59 from CTFd.constants.static import Static
60 from CTFd.constants.users import User
61 from CTFd.constants.teams import Team
62 from CTFd.forms import Forms
63 from CTFd.utils.config.visibility import (
64 accounts_visible,
65 challenges_visible,
66 registration_visible,
67 scores_visible,
68 )
69 from CTFd.utils.countries import get_countries, lookup_country_code
70 from CTFd.utils.countries.geoip import lookup_ip_address
71
72 app.jinja_env.globals.update(config=config)
73 app.jinja_env.globals.update(get_pages=get_pages)
74 app.jinja_env.globals.update(can_send_mail=can_send_mail)
75 app.jinja_env.globals.update(get_ctf_name=ctf_name)
76 app.jinja_env.globals.update(get_ctf_logo=ctf_logo)
77 app.jinja_env.globals.update(get_ctf_theme=ctf_theme)
78 app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)
79 app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)
80 app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)
81 app.jinja_env.globals.update(
82 get_registered_admin_scripts=get_registered_admin_scripts
83 )
84 app.jinja_env.globals.update(
85 get_registered_admin_stylesheets=get_registered_admin_stylesheets
86 )
87 app.jinja_env.globals.update(get_config=get_config)
88 app.jinja_env.globals.update(generate_account_url=generate_account_url)
89 app.jinja_env.globals.update(get_countries=get_countries)
90 app.jinja_env.globals.update(lookup_country_code=lookup_country_code)
91 app.jinja_env.globals.update(lookup_ip_address=lookup_ip_address)
92 app.jinja_env.globals.update(accounts_visible=accounts_visible)
93 app.jinja_env.globals.update(challenges_visible=challenges_visible)
94 app.jinja_env.globals.update(registration_visible=registration_visible)
95 app.jinja_env.globals.update(scores_visible=scores_visible)
96 app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)
97 app.jinja_env.globals.update(integrations=integrations)
98 app.jinja_env.globals.update(authed=authed)
99 app.jinja_env.globals.update(is_admin=is_admin)
100 app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)
101 app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)
102 app.jinja_env.globals.update(get_ip=get_ip)
103 app.jinja_env.globals.update(Configs=Configs)
104 app.jinja_env.globals.update(Plugins=Plugins)
105 app.jinja_env.globals.update(Session=Session)
106 app.jinja_env.globals.update(Static=Static)
107 app.jinja_env.globals.update(Forms=Forms)
108 app.jinja_env.globals.update(User=User)
109 app.jinja_env.globals.update(Team=Team)
110
111 # Add in JinjaEnums
112 # The reason this exists is that on double import, JinjaEnums are not reinitialized
113 # Thus, if you try to create two jinja envs (e.g. during testing), sometimes
114 # an Enum will not be available to Jinja.
115 # Instead we can just directly grab them from the persisted global dictionary.
116 for k, v in JINJA_ENUMS.items():
117 # .update() can't be used here because it would use the literal value k
118 app.jinja_env.globals[k] = v
119
120
121 def init_logs(app):
122 logger_submissions = logging.getLogger("submissions")
123 logger_logins = logging.getLogger("logins")
124 logger_registrations = logging.getLogger("registrations")
125
126 logger_submissions.setLevel(logging.INFO)
127 logger_logins.setLevel(logging.INFO)
128 logger_registrations.setLevel(logging.INFO)
129
130 log_dir = app.config["LOG_FOLDER"]
131 if not os.path.exists(log_dir):
132 os.makedirs(log_dir)
133
134 logs = {
135 "submissions": os.path.join(log_dir, "submissions.log"),
136 "logins": os.path.join(log_dir, "logins.log"),
137 "registrations": os.path.join(log_dir, "registrations.log"),
138 }
139
140 try:
141 for log in logs.values():
142 if not os.path.exists(log):
143 open(log, "a").close()
144
145 submission_log = logging.handlers.RotatingFileHandler(
146 logs["submissions"], maxBytes=10485760, backupCount=5
147 )
148 login_log = logging.handlers.RotatingFileHandler(
149 logs["logins"], maxBytes=10485760, backupCount=5
150 )
151 registration_log = logging.handlers.RotatingFileHandler(
152 logs["registrations"], maxBytes=10485760, backupCount=5
153 )
154
155 logger_submissions.addHandler(submission_log)
156 logger_logins.addHandler(login_log)
157 logger_registrations.addHandler(registration_log)
158 except IOError:
159 pass
160
161 stdout = logging.StreamHandler(stream=sys.stdout)
162
163 logger_submissions.addHandler(stdout)
164 logger_logins.addHandler(stdout)
165 logger_registrations.addHandler(stdout)
166
167 logger_submissions.propagate = 0
168 logger_logins.propagate = 0
169 logger_registrations.propagate = 0
170
171
172 def init_events(app):
173 if app.config.get("CACHE_TYPE") == "redis":
174 app.events_manager = RedisEventManager()
175 elif app.config.get("CACHE_TYPE") == "filesystem":
176 app.events_manager = EventManager()
177 else:
178 app.events_manager = EventManager()
179 app.events_manager.listen()
180
181
182 def init_request_processors(app):
183 @app.url_defaults
184 def inject_theme(endpoint, values):
185 if "theme" not in values and app.url_map.is_endpoint_expecting(
186 endpoint, "theme"
187 ):
188 values["theme"] = ctf_theme()
189
190 @app.before_request
191 def needs_setup():
192 if is_setup() is False:
193 if request.endpoint in (
194 "views.setup",
195 "views.integrations",
196 "views.themes",
197 "views.files",
198 ):
199 return
200 else:
201 return redirect(url_for("views.setup"))
202
203 @app.before_request
204 def tracker():
205 if request.endpoint == "views.themes":
206 return
207
208 if authed():
209 user_ips = get_current_user_recent_ips()
210 ip = get_ip()
211
212 track = None
213 if (ip not in user_ips) or (request.method != "GET"):
214 track = Tracking.query.filter_by(
215 ip=get_ip(), user_id=session["id"]
216 ).first()
217
218 if track:
219 track.date = datetime.datetime.utcnow()
220 else:
221 track = Tracking(ip=get_ip(), user_id=session["id"])
222 db.session.add(track)
223
224 if track:
225 try:
226 db.session.commit()
227 except (InvalidRequestError, IntegrityError):
228 db.session.rollback()
229 db.session.close()
230 logout_user()
231 else:
232 clear_user_recent_ips(user_id=session["id"])
233
234 @app.before_request
235 def banned():
236 if request.endpoint == "views.themes":
237 return
238
239 if authed():
240 user = get_current_user_attrs()
241 team = get_current_team_attrs()
242
243 if user and user.banned:
244 return (
245 render_template(
246 "errors/403.html", error="You have been banned from this CTF"
247 ),
248 403,
249 )
250
251 if team and team.banned:
252 return (
253 render_template(
254 "errors/403.html",
255 error="Your team has been banned from this CTF",
256 ),
257 403,
258 )
259
260 @app.before_request
261 def tokens():
262 token = request.headers.get("Authorization")
263 if token and request.content_type == "application/json":
264 try:
265 token_type, token = token.split(" ", 1)
266 user = lookup_user_token(token)
267 except UserNotFoundException:
268 abort(401)
269 except UserTokenExpiredException:
270 abort(401, description="Your access token has expired")
271 except Exception:
272 abort(401)
273 else:
274 login_user(user)
275
276 @app.before_request
277 def csrf():
278 try:
279 func = app.view_functions[request.endpoint]
280 except KeyError:
281 abort(404)
282 if hasattr(func, "_bypass_csrf"):
283 return
284 if request.headers.get("Authorization"):
285 return
286 if not session.get("nonce"):
287 session["nonce"] = generate_nonce()
288 if request.method not in ("GET", "HEAD", "OPTIONS", "TRACE"):
289 if request.content_type == "application/json":
290 if session["nonce"] != request.headers.get("CSRF-Token"):
291 abort(403)
292 if request.content_type != "application/json":
293 if session["nonce"] != request.form.get("nonce"):
294 abort(403)
295
296 application_root = app.config.get("APPLICATION_ROOT")
297 if application_root != "/":
298
299 @app.before_request
300 def force_subdirectory_redirect():
301 if request.path.startswith(application_root) is False:
302 return redirect(
303 application_root + request.script_root + request.full_path
304 )
305
306 app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})
307
```
Path: `CTFd/utils/countries/geoip.py`
Content:
```
1 import geoacumen
2 import maxminddb
3 from flask import current_app
4
5 IP_ADDR_LOOKUP = maxminddb.open_database(
6 current_app.config.get("GEOIP_DATABASE_PATH", geoacumen.db_path)
7 )
8
9
10 def lookup_ip_address(addr):
11 try:
12 response = IP_ADDR_LOOKUP.get(addr)
13 return response["country"]["iso_code"]
14 except (KeyError, ValueError):
15 return None
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/utils/countries/geoip.py b/CTFd/utils/countries/geoip.py
--- a/CTFd/utils/countries/geoip.py
+++ b/CTFd/utils/countries/geoip.py
@@ -1,9 +1,9 @@
-import geoacumen
+import geoacumen_city
import maxminddb
from flask import current_app
IP_ADDR_LOOKUP = maxminddb.open_database(
- current_app.config.get("GEOIP_DATABASE_PATH", geoacumen.db_path)
+ current_app.config.get("GEOIP_DATABASE_PATH", geoacumen_city.db_path)
)
@@ -11,5 +11,13 @@
try:
response = IP_ADDR_LOOKUP.get(addr)
return response["country"]["iso_code"]
- except (KeyError, ValueError):
+ except (KeyError, ValueError, TypeError):
+ return None
+
+
+def lookup_ip_address_city(addr):
+ try:
+ response = IP_ADDR_LOOKUP.get(addr)
+ return response["city"]["names"]["en"]
+ except (KeyError, ValueError, TypeError):
return None
diff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py
--- a/CTFd/utils/initialization/__init__.py
+++ b/CTFd/utils/initialization/__init__.py
@@ -67,7 +67,7 @@
scores_visible,
)
from CTFd.utils.countries import get_countries, lookup_country_code
- from CTFd.utils.countries.geoip import lookup_ip_address
+ from CTFd.utils.countries.geoip import lookup_ip_address, lookup_ip_address_city
app.jinja_env.globals.update(config=config)
app.jinja_env.globals.update(get_pages=get_pages)
@@ -89,6 +89,7 @@
app.jinja_env.globals.update(get_countries=get_countries)
app.jinja_env.globals.update(lookup_country_code=lookup_country_code)
app.jinja_env.globals.update(lookup_ip_address=lookup_ip_address)
+ app.jinja_env.globals.update(lookup_ip_address_city=lookup_ip_address_city)
app.jinja_env.globals.update(accounts_visible=accounts_visible)
app.jinja_env.globals.update(challenges_visible=challenges_visible)
app.jinja_env.globals.update(registration_visible=registration_visible)
| {"golden_diff": "diff --git a/CTFd/utils/countries/geoip.py b/CTFd/utils/countries/geoip.py\n--- a/CTFd/utils/countries/geoip.py\n+++ b/CTFd/utils/countries/geoip.py\n@@ -1,9 +1,9 @@\n-import geoacumen\n+import geoacumen_city\n import maxminddb\n from flask import current_app\n \n IP_ADDR_LOOKUP = maxminddb.open_database(\n- current_app.config.get(\"GEOIP_DATABASE_PATH\", geoacumen.db_path)\n+ current_app.config.get(\"GEOIP_DATABASE_PATH\", geoacumen_city.db_path)\n )\n \n \n@@ -11,5 +11,13 @@\n try:\n response = IP_ADDR_LOOKUP.get(addr)\n return response[\"country\"][\"iso_code\"]\n- except (KeyError, ValueError):\n+ except (KeyError, ValueError, TypeError):\n+ return None\n+\n+\n+def lookup_ip_address_city(addr):\n+ try:\n+ response = IP_ADDR_LOOKUP.get(addr)\n+ return response[\"city\"][\"names\"][\"en\"]\n+ except (KeyError, ValueError, TypeError):\n return None\ndiff --git a/CTFd/utils/initialization/__init__.py b/CTFd/utils/initialization/__init__.py\n--- a/CTFd/utils/initialization/__init__.py\n+++ b/CTFd/utils/initialization/__init__.py\n@@ -67,7 +67,7 @@\n scores_visible,\n )\n from CTFd.utils.countries import get_countries, lookup_country_code\n- from CTFd.utils.countries.geoip import lookup_ip_address\n+ from CTFd.utils.countries.geoip import lookup_ip_address, lookup_ip_address_city\n \n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n@@ -89,6 +89,7 @@\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(lookup_ip_address=lookup_ip_address)\n+ app.jinja_env.globals.update(lookup_ip_address_city=lookup_ip_address_city)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n", "issue": "IP to City Database\nI think we can provide an IP to city database now instead of just showing country. \n", "before_files": [{"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nfrom CTFd.cache import clear_user_recent_ips\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.humanize.words import pluralize\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import (\n authed,\n get_current_team_attrs,\n get_current_user_attrs,\n get_current_user_recent_ips,\n get_ip,\n is_admin,\n)\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n app.jinja_env.filters[\"pluralize\"] = pluralize\n\n\ndef init_template_globals(app):\n from CTFd.constants import JINJA_ENUMS\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n from CTFd.constants.static import Static\n from CTFd.constants.users import User\n from CTFd.constants.teams import Team\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n )\n from CTFd.utils.countries import get_countries, lookup_country_code\n from CTFd.utils.countries.geoip import lookup_ip_address\n\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(lookup_ip_address=lookup_ip_address)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n app.jinja_env.globals.update(is_admin=is_admin)\n app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)\n app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)\n app.jinja_env.globals.update(get_ip=get_ip)\n app.jinja_env.globals.update(Configs=Configs)\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Static=Static)\n app.jinja_env.globals.update(Forms=Forms)\n app.jinja_env.globals.update(User=User)\n app.jinja_env.globals.update(Team=Team)\n\n # Add in JinjaEnums\n # The reason this exists is that on double import, JinjaEnums are not reinitialized\n # Thus, if you try to create two jinja envs (e.g. during testing), sometimes\n # an Enum will not be available to Jinja.\n # Instead we can just directly grab them from the persisted global dictionary.\n for k, v in JINJA_ENUMS.items():\n # .update() can't be used here because it would use the literal value k\n app.jinja_env.globals[k] = v\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10485760, backupCount=5\n )\n login_log = logging.handlers.RotatingFileHandler(\n logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n app.events_manager.listen()\n\n\ndef init_request_processors(app):\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n \"views.files\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user_ips = get_current_user_recent_ips()\n ip = get_ip()\n\n track = None\n if (ip not in user_ips) or (request.method != \"GET\"):\n track = Tracking.query.filter_by(\n ip=get_ip(), user_id=session[\"id\"]\n ).first()\n\n if track:\n track.date = datetime.datetime.utcnow()\n else:\n track = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(track)\n\n if track:\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n db.session.close()\n logout_user()\n else:\n clear_user_recent_ips(user_id=session[\"id\"])\n\n @app.before_request\n def banned():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user = get_current_user_attrs()\n team = get_current_team_attrs()\n\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\", error=\"You have been banned from this CTF\"\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401, description=\"Your access token has expired\")\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}, {"content": "import geoacumen\nimport maxminddb\nfrom flask import current_app\n\nIP_ADDR_LOOKUP = maxminddb.open_database(\n current_app.config.get(\"GEOIP_DATABASE_PATH\", geoacumen.db_path)\n)\n\n\ndef lookup_ip_address(addr):\n try:\n response = IP_ADDR_LOOKUP.get(addr)\n return response[\"country\"][\"iso_code\"]\n except (KeyError, ValueError):\n return None\n", "path": "CTFd/utils/countries/geoip.py"}], "after_files": [{"content": "import datetime\nimport logging\nimport os\nimport sys\n\nfrom flask import abort, redirect, render_template, request, session, url_for\nfrom sqlalchemy.exc import IntegrityError, InvalidRequestError\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nfrom CTFd.cache import clear_user_recent_ips\nfrom CTFd.exceptions import UserNotFoundException, UserTokenExpiredException\nfrom CTFd.models import Tracking, db\nfrom CTFd.utils import config, get_config, markdown\nfrom CTFd.utils.config import (\n can_send_mail,\n ctf_logo,\n ctf_name,\n ctf_theme,\n integrations,\n is_setup,\n)\nfrom CTFd.utils.config.pages import get_pages\nfrom CTFd.utils.dates import isoformat, unix_time, unix_time_millis\nfrom CTFd.utils.events import EventManager, RedisEventManager\nfrom CTFd.utils.humanize.words import pluralize\nfrom CTFd.utils.modes import generate_account_url, get_mode_as_word\nfrom CTFd.utils.plugins import (\n get_configurable_plugins,\n get_registered_admin_scripts,\n get_registered_admin_stylesheets,\n get_registered_scripts,\n get_registered_stylesheets,\n)\nfrom CTFd.utils.security.auth import login_user, logout_user, lookup_user_token\nfrom CTFd.utils.security.csrf import generate_nonce\nfrom CTFd.utils.user import (\n authed,\n get_current_team_attrs,\n get_current_user_attrs,\n get_current_user_recent_ips,\n get_ip,\n is_admin,\n)\n\n\ndef init_template_filters(app):\n app.jinja_env.filters[\"markdown\"] = markdown\n app.jinja_env.filters[\"unix_time\"] = unix_time\n app.jinja_env.filters[\"unix_time_millis\"] = unix_time_millis\n app.jinja_env.filters[\"isoformat\"] = isoformat\n app.jinja_env.filters[\"pluralize\"] = pluralize\n\n\ndef init_template_globals(app):\n from CTFd.constants import JINJA_ENUMS\n from CTFd.constants.config import Configs\n from CTFd.constants.plugins import Plugins\n from CTFd.constants.sessions import Session\n from CTFd.constants.static import Static\n from CTFd.constants.users import User\n from CTFd.constants.teams import Team\n from CTFd.forms import Forms\n from CTFd.utils.config.visibility import (\n accounts_visible,\n challenges_visible,\n registration_visible,\n scores_visible,\n )\n from CTFd.utils.countries import get_countries, lookup_country_code\n from CTFd.utils.countries.geoip import lookup_ip_address, lookup_ip_address_city\n\n app.jinja_env.globals.update(config=config)\n app.jinja_env.globals.update(get_pages=get_pages)\n app.jinja_env.globals.update(can_send_mail=can_send_mail)\n app.jinja_env.globals.update(get_ctf_name=ctf_name)\n app.jinja_env.globals.update(get_ctf_logo=ctf_logo)\n app.jinja_env.globals.update(get_ctf_theme=ctf_theme)\n app.jinja_env.globals.update(get_configurable_plugins=get_configurable_plugins)\n app.jinja_env.globals.update(get_registered_scripts=get_registered_scripts)\n app.jinja_env.globals.update(get_registered_stylesheets=get_registered_stylesheets)\n app.jinja_env.globals.update(\n get_registered_admin_scripts=get_registered_admin_scripts\n )\n app.jinja_env.globals.update(\n get_registered_admin_stylesheets=get_registered_admin_stylesheets\n )\n app.jinja_env.globals.update(get_config=get_config)\n app.jinja_env.globals.update(generate_account_url=generate_account_url)\n app.jinja_env.globals.update(get_countries=get_countries)\n app.jinja_env.globals.update(lookup_country_code=lookup_country_code)\n app.jinja_env.globals.update(lookup_ip_address=lookup_ip_address)\n app.jinja_env.globals.update(lookup_ip_address_city=lookup_ip_address_city)\n app.jinja_env.globals.update(accounts_visible=accounts_visible)\n app.jinja_env.globals.update(challenges_visible=challenges_visible)\n app.jinja_env.globals.update(registration_visible=registration_visible)\n app.jinja_env.globals.update(scores_visible=scores_visible)\n app.jinja_env.globals.update(get_mode_as_word=get_mode_as_word)\n app.jinja_env.globals.update(integrations=integrations)\n app.jinja_env.globals.update(authed=authed)\n app.jinja_env.globals.update(is_admin=is_admin)\n app.jinja_env.globals.update(get_current_user_attrs=get_current_user_attrs)\n app.jinja_env.globals.update(get_current_team_attrs=get_current_team_attrs)\n app.jinja_env.globals.update(get_ip=get_ip)\n app.jinja_env.globals.update(Configs=Configs)\n app.jinja_env.globals.update(Plugins=Plugins)\n app.jinja_env.globals.update(Session=Session)\n app.jinja_env.globals.update(Static=Static)\n app.jinja_env.globals.update(Forms=Forms)\n app.jinja_env.globals.update(User=User)\n app.jinja_env.globals.update(Team=Team)\n\n # Add in JinjaEnums\n # The reason this exists is that on double import, JinjaEnums are not reinitialized\n # Thus, if you try to create two jinja envs (e.g. during testing), sometimes\n # an Enum will not be available to Jinja.\n # Instead we can just directly grab them from the persisted global dictionary.\n for k, v in JINJA_ENUMS.items():\n # .update() can't be used here because it would use the literal value k\n app.jinja_env.globals[k] = v\n\n\ndef init_logs(app):\n logger_submissions = logging.getLogger(\"submissions\")\n logger_logins = logging.getLogger(\"logins\")\n logger_registrations = logging.getLogger(\"registrations\")\n\n logger_submissions.setLevel(logging.INFO)\n logger_logins.setLevel(logging.INFO)\n logger_registrations.setLevel(logging.INFO)\n\n log_dir = app.config[\"LOG_FOLDER\"]\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n logs = {\n \"submissions\": os.path.join(log_dir, \"submissions.log\"),\n \"logins\": os.path.join(log_dir, \"logins.log\"),\n \"registrations\": os.path.join(log_dir, \"registrations.log\"),\n }\n\n try:\n for log in logs.values():\n if not os.path.exists(log):\n open(log, \"a\").close()\n\n submission_log = logging.handlers.RotatingFileHandler(\n logs[\"submissions\"], maxBytes=10485760, backupCount=5\n )\n login_log = logging.handlers.RotatingFileHandler(\n logs[\"logins\"], maxBytes=10485760, backupCount=5\n )\n registration_log = logging.handlers.RotatingFileHandler(\n logs[\"registrations\"], maxBytes=10485760, backupCount=5\n )\n\n logger_submissions.addHandler(submission_log)\n logger_logins.addHandler(login_log)\n logger_registrations.addHandler(registration_log)\n except IOError:\n pass\n\n stdout = logging.StreamHandler(stream=sys.stdout)\n\n logger_submissions.addHandler(stdout)\n logger_logins.addHandler(stdout)\n logger_registrations.addHandler(stdout)\n\n logger_submissions.propagate = 0\n logger_logins.propagate = 0\n logger_registrations.propagate = 0\n\n\ndef init_events(app):\n if app.config.get(\"CACHE_TYPE\") == \"redis\":\n app.events_manager = RedisEventManager()\n elif app.config.get(\"CACHE_TYPE\") == \"filesystem\":\n app.events_manager = EventManager()\n else:\n app.events_manager = EventManager()\n app.events_manager.listen()\n\n\ndef init_request_processors(app):\n @app.url_defaults\n def inject_theme(endpoint, values):\n if \"theme\" not in values and app.url_map.is_endpoint_expecting(\n endpoint, \"theme\"\n ):\n values[\"theme\"] = ctf_theme()\n\n @app.before_request\n def needs_setup():\n if is_setup() is False:\n if request.endpoint in (\n \"views.setup\",\n \"views.integrations\",\n \"views.themes\",\n \"views.files\",\n ):\n return\n else:\n return redirect(url_for(\"views.setup\"))\n\n @app.before_request\n def tracker():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user_ips = get_current_user_recent_ips()\n ip = get_ip()\n\n track = None\n if (ip not in user_ips) or (request.method != \"GET\"):\n track = Tracking.query.filter_by(\n ip=get_ip(), user_id=session[\"id\"]\n ).first()\n\n if track:\n track.date = datetime.datetime.utcnow()\n else:\n track = Tracking(ip=get_ip(), user_id=session[\"id\"])\n db.session.add(track)\n\n if track:\n try:\n db.session.commit()\n except (InvalidRequestError, IntegrityError):\n db.session.rollback()\n db.session.close()\n logout_user()\n else:\n clear_user_recent_ips(user_id=session[\"id\"])\n\n @app.before_request\n def banned():\n if request.endpoint == \"views.themes\":\n return\n\n if authed():\n user = get_current_user_attrs()\n team = get_current_team_attrs()\n\n if user and user.banned:\n return (\n render_template(\n \"errors/403.html\", error=\"You have been banned from this CTF\"\n ),\n 403,\n )\n\n if team and team.banned:\n return (\n render_template(\n \"errors/403.html\",\n error=\"Your team has been banned from this CTF\",\n ),\n 403,\n )\n\n @app.before_request\n def tokens():\n token = request.headers.get(\"Authorization\")\n if token and request.content_type == \"application/json\":\n try:\n token_type, token = token.split(\" \", 1)\n user = lookup_user_token(token)\n except UserNotFoundException:\n abort(401)\n except UserTokenExpiredException:\n abort(401, description=\"Your access token has expired\")\n except Exception:\n abort(401)\n else:\n login_user(user)\n\n @app.before_request\n def csrf():\n try:\n func = app.view_functions[request.endpoint]\n except KeyError:\n abort(404)\n if hasattr(func, \"_bypass_csrf\"):\n return\n if request.headers.get(\"Authorization\"):\n return\n if not session.get(\"nonce\"):\n session[\"nonce\"] = generate_nonce()\n if request.method not in (\"GET\", \"HEAD\", \"OPTIONS\", \"TRACE\"):\n if request.content_type == \"application/json\":\n if session[\"nonce\"] != request.headers.get(\"CSRF-Token\"):\n abort(403)\n if request.content_type != \"application/json\":\n if session[\"nonce\"] != request.form.get(\"nonce\"):\n abort(403)\n\n application_root = app.config.get(\"APPLICATION_ROOT\")\n if application_root != \"/\":\n\n @app.before_request\n def force_subdirectory_redirect():\n if request.path.startswith(application_root) is False:\n return redirect(\n application_root + request.script_root + request.full_path\n )\n\n app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {application_root: app})\n", "path": "CTFd/utils/initialization/__init__.py"}, {"content": "import geoacumen_city\nimport maxminddb\nfrom flask import current_app\n\nIP_ADDR_LOOKUP = maxminddb.open_database(\n current_app.config.get(\"GEOIP_DATABASE_PATH\", geoacumen_city.db_path)\n)\n\n\ndef lookup_ip_address(addr):\n try:\n response = IP_ADDR_LOOKUP.get(addr)\n return response[\"country\"][\"iso_code\"]\n except (KeyError, ValueError, TypeError):\n return None\n\n\ndef lookup_ip_address_city(addr):\n try:\n response = IP_ADDR_LOOKUP.get(addr)\n return response[\"city\"][\"names\"][\"en\"]\n except (KeyError, ValueError, TypeError):\n return None\n", "path": "CTFd/utils/countries/geoip.py"}]} | 3,661 | 520 |
gh_patches_debug_39078 | rasdani/github-patches | git_diff | ranaroussi__yfinance-1297 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Scraper error "TypeError: string indices must be integers" - Yahoo decrypt fail
## Updates
### 2023 January 13
By the time of posting the issue (2023 January 12), the issue only occured sometimes. The library is now (2023 January 13) completely broken and I am unable to retrieve any stock informatio
### 2023 January 14
Fix has been merged to the branch `dev`
## Info about your system:
yfinance version: 0.2.3
Operating system: macOS Monteray 12.0.1
### Snippet that can recreate the error
```
stock = yf.Ticker("^GSPC")
info = stock.info
```
## Error
Message:`TypeError: string indices must be integers`
It seems to be a problem where the scraper is not scraping the correct information, leading to a crash.
### Traceback:
```
Traceback (most recent call last):
File "/home/2022/szhang139/.local/lib/python3.10/site-packages/apscheduler/executors/base_py3.py", line 30, in run_coroutine_job
retval = await job.func(*job.args, **job.kwargs)
File "/home/2022/szhang139/repos/STONK/src/main.py", line 61, in notify
market = get_major_index(f'Market Close - {daytime.today_date()}')
File "/home/2022/szhang139/repos/STONK/src/market_info.py", line 63, in get_major_index
sp500 = get_stock('^GSPC')
File "/home/2022/szhang139/repos/STONK/src/market_info.py", line 41, in get_stock
stock_info = get_stock_info(stock_name)
File "/home/2022/szhang139/repos/STONK/src/market_info.py", line 8, in get_stock_info
info = stock.info
File "/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/ticker.py", line 138, in info
return self.get_info()
File "/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/base.py", line 894, in get_info
data = self._quote.info
File "/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/scrapers/quote.py", line 27, in info
self._scrape(self.proxy)
File "/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/scrapers/quote.py", line 58, in _scrape
quote_summary_store = json_data['QuoteSummaryStore']
```
### Frequency
The error occurs in no apparent pattern. Every time it occurs, it seem to persist for some range of time before it recovers back to normal.
n.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yfinance/data.py`
Content:
```
1 import functools
2 from functools import lru_cache
3
4 import hashlib
5 from base64 import b64decode
6 usePycryptodome = False # slightly faster
7 # usePycryptodome = True
8 if usePycryptodome:
9 from Crypto.Cipher import AES
10 from Crypto.Util.Padding import unpad
11 else:
12 from cryptography.hazmat.primitives import padding
13 from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
14
15 import requests as requests
16 import re
17
18 from frozendict import frozendict
19
20 try:
21 import ujson as json
22 except ImportError:
23 import json as json
24
25 cache_maxsize = 64
26
27
28 def lru_cache_freezeargs(func):
29 """
30 Decorator transforms mutable dictionary and list arguments into immutable types
31 Needed so lru_cache can cache method calls what has dict or list arguments.
32 """
33
34 @functools.wraps(func)
35 def wrapped(*args, **kwargs):
36 args = tuple([frozendict(arg) if isinstance(arg, dict) else arg for arg in args])
37 kwargs = {k: frozendict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}
38 args = tuple([tuple(arg) if isinstance(arg, list) else arg for arg in args])
39 kwargs = {k: tuple(v) if isinstance(v, list) else v for k, v in kwargs.items()}
40 return func(*args, **kwargs)
41
42 # copy over the lru_cache extra methods to this wrapper to be able to access them
43 # after this decorator has been applied
44 wrapped.cache_info = func.cache_info
45 wrapped.cache_clear = func.cache_clear
46 return wrapped
47
48
49 def decrypt_cryptojs_aes(data):
50 encrypted_stores = data['context']['dispatcher']['stores']
51 _cs = data["_cs"]
52 _cr = data["_cr"]
53
54 _cr = b"".join(int.to_bytes(i, length=4, byteorder="big", signed=True) for i in json.loads(_cr)["words"])
55 password = hashlib.pbkdf2_hmac("sha1", _cs.encode("utf8"), _cr, 1, dklen=32).hex()
56
57 encrypted_stores = b64decode(encrypted_stores)
58 assert encrypted_stores[0:8] == b"Salted__"
59 salt = encrypted_stores[8:16]
60 encrypted_stores = encrypted_stores[16:]
61
62 def EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5") -> tuple:
63 """OpenSSL EVP Key Derivation Function
64 Args:
65 password (Union[str, bytes, bytearray]): Password to generate key from.
66 salt (Union[bytes, bytearray]): Salt to use.
67 keySize (int, optional): Output key length in bytes. Defaults to 32.
68 ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.
69 iterations (int, optional): Number of iterations to perform. Defaults to 1.
70 hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.
71 Returns:
72 key, iv: Derived key and Initialization Vector (IV) bytes.
73
74 Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3
75 OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78
76 """
77
78 assert iterations > 0, "Iterations can not be less than 1."
79
80 if isinstance(password, str):
81 password = password.encode("utf-8")
82
83 final_length = keySize + ivSize
84 key_iv = b""
85 block = None
86
87 while len(key_iv) < final_length:
88 hasher = hashlib.new(hashAlgorithm)
89 if block:
90 hasher.update(block)
91 hasher.update(password)
92 hasher.update(salt)
93 block = hasher.digest()
94 for _ in range(1, iterations):
95 block = hashlib.new(hashAlgorithm, block).digest()
96 key_iv += block
97
98 key, iv = key_iv[:keySize], key_iv[keySize:final_length]
99 return key, iv
100
101 key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5")
102
103 if usePycryptodome:
104 cipher = AES.new(key, AES.MODE_CBC, iv=iv)
105 plaintext = cipher.decrypt(encrypted_stores)
106 plaintext = unpad(plaintext, 16, style="pkcs7")
107 else:
108 cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
109 decryptor = cipher.decryptor()
110 plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()
111 unpadder = padding.PKCS7(128).unpadder()
112 plaintext = unpadder.update(plaintext) + unpadder.finalize()
113 plaintext = plaintext.decode("utf-8")
114
115 decoded_stores = json.loads(plaintext)
116 return decoded_stores
117
118
119 _SCRAPE_URL_ = 'https://finance.yahoo.com/quote'
120
121
122 class TickerData:
123 """
124 Have one place to retrieve data from Yahoo API in order to ease caching and speed up operations
125 """
126 user_agent_headers = {
127 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}
128
129 def __init__(self, ticker: str, session=None):
130 self.ticker = ticker
131 self._session = session or requests
132
133 def get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):
134 proxy = self._get_proxy(proxy)
135 response = self._session.get(
136 url=url,
137 params=params,
138 proxies=proxy,
139 timeout=timeout,
140 headers=user_agent_headers or self.user_agent_headers)
141 return response
142
143 @lru_cache_freezeargs
144 @lru_cache(maxsize=cache_maxsize)
145 def cache_get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):
146 return self.get(url, user_agent_headers, params, proxy, timeout)
147
148 def _get_proxy(self, proxy):
149 # setup proxy in requests format
150 if proxy is not None:
151 if isinstance(proxy, dict) and "https" in proxy:
152 proxy = proxy["https"]
153 proxy = {"https": proxy}
154 return proxy
155
156 @lru_cache_freezeargs
157 @lru_cache(maxsize=cache_maxsize)
158 def get_json_data_stores(self, sub_page: str = None, proxy=None) -> dict:
159 '''
160 get_json_data_stores returns a python dictionary of the data stores in yahoo finance web page.
161 '''
162 if sub_page:
163 ticker_url = "{}/{}/{}".format(_SCRAPE_URL_, self.ticker, sub_page)
164 else:
165 ticker_url = "{}/{}".format(_SCRAPE_URL_, self.ticker)
166
167 html = self.get(url=ticker_url, proxy=proxy).text
168
169 # The actual json-data for stores is in a javascript assignment in the webpage
170 try:
171 json_str = html.split('root.App.main =')[1].split(
172 '(this)')[0].split(';\n}')[0].strip()
173 except IndexError:
174 # Fetch failed, probably because Yahoo spam triggered
175 return {}
176
177 data = json.loads(json_str)
178
179 if "_cs" in data and "_cr" in data:
180 data = decrypt_cryptojs_aes(data)
181
182 if "context" in data and "dispatcher" in data["context"]:
183 # Keep old code, just in case
184 data = data['context']['dispatcher']['stores']
185
186 # return data
187 new_data = json.dumps(data).replace('{}', 'null')
188 new_data = re.sub(
189 r'{[\'|\"]raw[\'|\"]:(.*?),(.*?)}', r'\1', new_data)
190
191 return json.loads(new_data)
192
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/yfinance/data.py b/yfinance/data.py
--- a/yfinance/data.py
+++ b/yfinance/data.py
@@ -46,13 +46,33 @@
return wrapped
-def decrypt_cryptojs_aes(data):
+def decrypt_cryptojs_aes_stores(data):
encrypted_stores = data['context']['dispatcher']['stores']
- _cs = data["_cs"]
- _cr = data["_cr"]
- _cr = b"".join(int.to_bytes(i, length=4, byteorder="big", signed=True) for i in json.loads(_cr)["words"])
- password = hashlib.pbkdf2_hmac("sha1", _cs.encode("utf8"), _cr, 1, dklen=32).hex()
+ if "_cs" in data and "_cr" in data:
+ _cs = data["_cs"]
+ _cr = data["_cr"]
+ _cr = b"".join(int.to_bytes(i, length=4, byteorder="big", signed=True) for i in json.loads(_cr)["words"])
+ password = hashlib.pbkdf2_hmac("sha1", _cs.encode("utf8"), _cr, 1, dklen=32).hex()
+ else:
+ # Currently assume one extra key in dict, which is password. Print error if
+ # more extra keys detected.
+ new_keys = [k for k in data.keys() if k not in ["context", "plugins"]]
+ l = len(new_keys)
+ if l == 0:
+ return None
+ elif l == 1 and isinstance(data[new_keys[0]], str):
+ password_key = new_keys[0]
+ else:
+ msg = "Yahoo has again changed data format, yfinance now unsure which key(s) is for decryption:"
+ k = new_keys[0]
+ k_str = k if len(k) < 32 else k[:32-3]+"..."
+ msg += f" '{k_str}'->{type(data[k])}"
+ for i in range(1, len(new_keys)):
+ msg += f" , '{k_str}'->{type(data[k])}"
+ raise Exception(msg)
+ password_key = new_keys[0]
+ password = data[password_key]
encrypted_stores = b64decode(encrypted_stores)
assert encrypted_stores[0:8] == b"Salted__"
@@ -98,7 +118,10 @@
key, iv = key_iv[:keySize], key_iv[keySize:final_length]
return key, iv
- key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5")
+ try:
+ key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm="md5")
+ except:
+ raise Exception("yfinance failed to decrypt Yahoo data response")
if usePycryptodome:
cipher = AES.new(key, AES.MODE_CBC, iv=iv)
@@ -176,15 +199,16 @@
data = json.loads(json_str)
- if "_cs" in data and "_cr" in data:
- data = decrypt_cryptojs_aes(data)
-
- if "context" in data and "dispatcher" in data["context"]:
- # Keep old code, just in case
- data = data['context']['dispatcher']['stores']
+ stores = decrypt_cryptojs_aes_stores(data)
+ if stores is None:
+ # Maybe Yahoo returned old format, not encrypted
+ if "context" in data and "dispatcher" in data["context"]:
+ stores = data['context']['dispatcher']['stores']
+ if stores is None:
+ raise Exception(f"{self.ticker}: Failed to extract data stores from web request")
# return data
- new_data = json.dumps(data).replace('{}', 'null')
+ new_data = json.dumps(stores).replace('{}', 'null')
new_data = re.sub(
r'{[\'|\"]raw[\'|\"]:(.*?),(.*?)}', r'\1', new_data)
| {"golden_diff": "diff --git a/yfinance/data.py b/yfinance/data.py\n--- a/yfinance/data.py\n+++ b/yfinance/data.py\n@@ -46,13 +46,33 @@\n return wrapped\n \n \n-def decrypt_cryptojs_aes(data):\n+def decrypt_cryptojs_aes_stores(data):\n encrypted_stores = data['context']['dispatcher']['stores']\n- _cs = data[\"_cs\"]\n- _cr = data[\"_cr\"]\n \n- _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in json.loads(_cr)[\"words\"])\n- password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n+ if \"_cs\" in data and \"_cr\" in data:\n+ _cs = data[\"_cs\"]\n+ _cr = data[\"_cr\"]\n+ _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in json.loads(_cr)[\"words\"])\n+ password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n+ else:\n+ # Currently assume one extra key in dict, which is password. Print error if \n+ # more extra keys detected.\n+ new_keys = [k for k in data.keys() if k not in [\"context\", \"plugins\"]]\n+ l = len(new_keys)\n+ if l == 0:\n+ return None\n+ elif l == 1 and isinstance(data[new_keys[0]], str):\n+ password_key = new_keys[0]\n+ else:\n+ msg = \"Yahoo has again changed data format, yfinance now unsure which key(s) is for decryption:\"\n+ k = new_keys[0]\n+ k_str = k if len(k) < 32 else k[:32-3]+\"...\"\n+ msg += f\" '{k_str}'->{type(data[k])}\"\n+ for i in range(1, len(new_keys)):\n+ msg += f\" , '{k_str}'->{type(data[k])}\"\n+ raise Exception(msg)\n+ password_key = new_keys[0]\n+ password = data[password_key]\n \n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n@@ -98,7 +118,10 @@\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n \n- key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n+ try:\n+ key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n+ except:\n+ raise Exception(\"yfinance failed to decrypt Yahoo data response\")\n \n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n@@ -176,15 +199,16 @@\n \n data = json.loads(json_str)\n \n- if \"_cs\" in data and \"_cr\" in data:\n- data = decrypt_cryptojs_aes(data)\n-\n- if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n- # Keep old code, just in case\n- data = data['context']['dispatcher']['stores']\n+ stores = decrypt_cryptojs_aes_stores(data)\n+ if stores is None:\n+ # Maybe Yahoo returned old format, not encrypted\n+ if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n+ stores = data['context']['dispatcher']['stores']\n+ if stores is None:\n+ raise Exception(f\"{self.ticker}: Failed to extract data stores from web request\")\n \n # return data\n- new_data = json.dumps(data).replace('{}', 'null')\n+ new_data = json.dumps(stores).replace('{}', 'null')\n new_data = re.sub(\n r'{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)}', r'\\1', new_data)\n", "issue": "Scraper error \"TypeError: string indices must be integers\" - Yahoo decrypt fail\n## Updates\r\n### 2023 January 13\r\nBy the time of posting the issue (2023 January 12), the issue only occured sometimes. The library is now (2023 January 13) completely broken and I am unable to retrieve any stock informatio\r\n### 2023 January 14\r\nFix has been merged to the branch `dev`\r\n\r\n## Info about your system:\r\nyfinance version: 0.2.3\r\nOperating system: macOS Monteray 12.0.1\r\n### Snippet that can recreate the error\r\n```\r\nstock = yf.Ticker(\"^GSPC\")\r\ninfo = stock.info\r\n```\r\n## Error\r\nMessage:`TypeError: string indices must be integers`\r\nIt seems to be a problem where the scraper is not scraping the correct information, leading to a crash.\r\n### Traceback:\r\n```\r\nTraceback (most recent call last):\r\n File \"/home/2022/szhang139/.local/lib/python3.10/site-packages/apscheduler/executors/base_py3.py\", line 30, in run_coroutine_job\r\n retval = await job.func(*job.args, **job.kwargs)\r\n File \"/home/2022/szhang139/repos/STONK/src/main.py\", line 61, in notify\r\n market = get_major_index(f'Market Close - {daytime.today_date()}')\r\n File \"/home/2022/szhang139/repos/STONK/src/market_info.py\", line 63, in get_major_index\r\n sp500 = get_stock('^GSPC')\r\n File \"/home/2022/szhang139/repos/STONK/src/market_info.py\", line 41, in get_stock\r\n stock_info = get_stock_info(stock_name)\r\n File \"/home/2022/szhang139/repos/STONK/src/market_info.py\", line 8, in get_stock_info\r\n info = stock.info\r\n File \"/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/ticker.py\", line 138, in info\r\n return self.get_info()\r\n File \"/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/base.py\", line 894, in get_info\r\n data = self._quote.info\r\n File \"/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/scrapers/quote.py\", line 27, in info\r\n self._scrape(self.proxy)\r\n File \"/home/2022/szhang139/.local/lib/python3.10/site-packages/yfinance/scrapers/quote.py\", line 58, in _scrape\r\n quote_summary_store = json_data['QuoteSummaryStore']\r\n```\r\n### Frequency\r\nThe error occurs in no apparent pattern. Every time it occurs, it seem to persist for some range of time before it recovers back to normal.\r\nn. \r\n\n", "before_files": [{"content": "import functools\nfrom functools import lru_cache\n\nimport hashlib\nfrom base64 import b64decode\nusePycryptodome = False # slightly faster\n# usePycryptodome = True\nif usePycryptodome:\n from Crypto.Cipher import AES\n from Crypto.Util.Padding import unpad\nelse:\n from cryptography.hazmat.primitives import padding\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nimport requests as requests\nimport re\n\nfrom frozendict import frozendict\n\ntry:\n import ujson as json\nexcept ImportError:\n import json as json\n\ncache_maxsize = 64\n\n\ndef lru_cache_freezeargs(func):\n \"\"\"\n Decorator transforms mutable dictionary and list arguments into immutable types\n Needed so lru_cache can cache method calls what has dict or list arguments.\n \"\"\"\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n args = tuple([frozendict(arg) if isinstance(arg, dict) else arg for arg in args])\n kwargs = {k: frozendict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}\n args = tuple([tuple(arg) if isinstance(arg, list) else arg for arg in args])\n kwargs = {k: tuple(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n return func(*args, **kwargs)\n\n # copy over the lru_cache extra methods to this wrapper to be able to access them\n # after this decorator has been applied\n wrapped.cache_info = func.cache_info\n wrapped.cache_clear = func.cache_clear\n return wrapped\n\n\ndef decrypt_cryptojs_aes(data):\n encrypted_stores = data['context']['dispatcher']['stores']\n _cs = data[\"_cs\"]\n _cr = data[\"_cr\"]\n\n _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in json.loads(_cr)[\"words\"])\n password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n\n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n salt = encrypted_stores[8:16]\n encrypted_stores = encrypted_stores[16:]\n\n def EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\") -> tuple:\n \"\"\"OpenSSL EVP Key Derivation Function\n Args:\n password (Union[str, bytes, bytearray]): Password to generate key from.\n salt (Union[bytes, bytearray]): Salt to use.\n keySize (int, optional): Output key length in bytes. Defaults to 32.\n ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.\n iterations (int, optional): Number of iterations to perform. Defaults to 1.\n hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.\n Returns:\n key, iv: Derived key and Initialization Vector (IV) bytes.\n\n Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3\n OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78\n \"\"\"\n\n assert iterations > 0, \"Iterations can not be less than 1.\"\n\n if isinstance(password, str):\n password = password.encode(\"utf-8\")\n\n final_length = keySize + ivSize\n key_iv = b\"\"\n block = None\n\n while len(key_iv) < final_length:\n hasher = hashlib.new(hashAlgorithm)\n if block:\n hasher.update(block)\n hasher.update(password)\n hasher.update(salt)\n block = hasher.digest()\n for _ in range(1, iterations):\n block = hashlib.new(hashAlgorithm, block).digest()\n key_iv += block\n\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n\n key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n\n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n plaintext = cipher.decrypt(encrypted_stores)\n plaintext = unpad(plaintext, 16, style=\"pkcs7\")\n else:\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n plaintext = unpadder.update(plaintext) + unpadder.finalize()\n plaintext = plaintext.decode(\"utf-8\")\n\n decoded_stores = json.loads(plaintext)\n return decoded_stores\n\n\n_SCRAPE_URL_ = 'https://finance.yahoo.com/quote'\n\n\nclass TickerData:\n \"\"\"\n Have one place to retrieve data from Yahoo API in order to ease caching and speed up operations\n \"\"\"\n user_agent_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n def __init__(self, ticker: str, session=None):\n self.ticker = ticker\n self._session = session or requests\n\n def get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):\n proxy = self._get_proxy(proxy)\n response = self._session.get(\n url=url,\n params=params,\n proxies=proxy,\n timeout=timeout,\n headers=user_agent_headers or self.user_agent_headers)\n return response\n\n @lru_cache_freezeargs\n @lru_cache(maxsize=cache_maxsize)\n def cache_get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):\n return self.get(url, user_agent_headers, params, proxy, timeout)\n\n def _get_proxy(self, proxy):\n # setup proxy in requests format\n if proxy is not None:\n if isinstance(proxy, dict) and \"https\" in proxy:\n proxy = proxy[\"https\"]\n proxy = {\"https\": proxy}\n return proxy\n\n @lru_cache_freezeargs\n @lru_cache(maxsize=cache_maxsize)\n def get_json_data_stores(self, sub_page: str = None, proxy=None) -> dict:\n '''\n get_json_data_stores returns a python dictionary of the data stores in yahoo finance web page.\n '''\n if sub_page:\n ticker_url = \"{}/{}/{}\".format(_SCRAPE_URL_, self.ticker, sub_page)\n else:\n ticker_url = \"{}/{}\".format(_SCRAPE_URL_, self.ticker)\n\n html = self.get(url=ticker_url, proxy=proxy).text\n\n # The actual json-data for stores is in a javascript assignment in the webpage\n try:\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n except IndexError:\n # Fetch failed, probably because Yahoo spam triggered\n return {}\n\n data = json.loads(json_str)\n\n if \"_cs\" in data and \"_cr\" in data:\n data = decrypt_cryptojs_aes(data)\n\n if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n # Keep old code, just in case\n data = data['context']['dispatcher']['stores']\n\n # return data\n new_data = json.dumps(data).replace('{}', 'null')\n new_data = re.sub(\n r'{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)}', r'\\1', new_data)\n\n return json.loads(new_data)\n", "path": "yfinance/data.py"}], "after_files": [{"content": "import functools\nfrom functools import lru_cache\n\nimport hashlib\nfrom base64 import b64decode\nusePycryptodome = False # slightly faster\n# usePycryptodome = True\nif usePycryptodome:\n from Crypto.Cipher import AES\n from Crypto.Util.Padding import unpad\nelse:\n from cryptography.hazmat.primitives import padding\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nimport requests as requests\nimport re\n\nfrom frozendict import frozendict\n\ntry:\n import ujson as json\nexcept ImportError:\n import json as json\n\ncache_maxsize = 64\n\n\ndef lru_cache_freezeargs(func):\n \"\"\"\n Decorator transforms mutable dictionary and list arguments into immutable types\n Needed so lru_cache can cache method calls what has dict or list arguments.\n \"\"\"\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n args = tuple([frozendict(arg) if isinstance(arg, dict) else arg for arg in args])\n kwargs = {k: frozendict(v) if isinstance(v, dict) else v for k, v in kwargs.items()}\n args = tuple([tuple(arg) if isinstance(arg, list) else arg for arg in args])\n kwargs = {k: tuple(v) if isinstance(v, list) else v for k, v in kwargs.items()}\n return func(*args, **kwargs)\n\n # copy over the lru_cache extra methods to this wrapper to be able to access them\n # after this decorator has been applied\n wrapped.cache_info = func.cache_info\n wrapped.cache_clear = func.cache_clear\n return wrapped\n\n\ndef decrypt_cryptojs_aes_stores(data):\n encrypted_stores = data['context']['dispatcher']['stores']\n\n if \"_cs\" in data and \"_cr\" in data:\n _cs = data[\"_cs\"]\n _cr = data[\"_cr\"]\n _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in json.loads(_cr)[\"words\"])\n password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n else:\n # Currently assume one extra key in dict, which is password. Print error if \n # more extra keys detected.\n new_keys = [k for k in data.keys() if k not in [\"context\", \"plugins\"]]\n l = len(new_keys)\n if l == 0:\n return None\n elif l == 1 and isinstance(data[new_keys[0]], str):\n password_key = new_keys[0]\n else:\n msg = \"Yahoo has again changed data format, yfinance now unsure which key(s) is for decryption:\"\n k = new_keys[0]\n k_str = k if len(k) < 32 else k[:32-3]+\"...\"\n msg += f\" '{k_str}'->{type(data[k])}\"\n for i in range(1, len(new_keys)):\n msg += f\" , '{k_str}'->{type(data[k])}\"\n raise Exception(msg)\n password_key = new_keys[0]\n password = data[password_key]\n\n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n salt = encrypted_stores[8:16]\n encrypted_stores = encrypted_stores[16:]\n\n def EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\") -> tuple:\n \"\"\"OpenSSL EVP Key Derivation Function\n Args:\n password (Union[str, bytes, bytearray]): Password to generate key from.\n salt (Union[bytes, bytearray]): Salt to use.\n keySize (int, optional): Output key length in bytes. Defaults to 32.\n ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.\n iterations (int, optional): Number of iterations to perform. Defaults to 1.\n hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.\n Returns:\n key, iv: Derived key and Initialization Vector (IV) bytes.\n\n Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3\n OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78\n \"\"\"\n\n assert iterations > 0, \"Iterations can not be less than 1.\"\n\n if isinstance(password, str):\n password = password.encode(\"utf-8\")\n\n final_length = keySize + ivSize\n key_iv = b\"\"\n block = None\n\n while len(key_iv) < final_length:\n hasher = hashlib.new(hashAlgorithm)\n if block:\n hasher.update(block)\n hasher.update(password)\n hasher.update(salt)\n block = hasher.digest()\n for _ in range(1, iterations):\n block = hashlib.new(hashAlgorithm, block).digest()\n key_iv += block\n\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n\n try:\n key, iv = EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n except:\n raise Exception(\"yfinance failed to decrypt Yahoo data response\")\n\n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n plaintext = cipher.decrypt(encrypted_stores)\n plaintext = unpad(plaintext, 16, style=\"pkcs7\")\n else:\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n plaintext = unpadder.update(plaintext) + unpadder.finalize()\n plaintext = plaintext.decode(\"utf-8\")\n\n decoded_stores = json.loads(plaintext)\n return decoded_stores\n\n\n_SCRAPE_URL_ = 'https://finance.yahoo.com/quote'\n\n\nclass TickerData:\n \"\"\"\n Have one place to retrieve data from Yahoo API in order to ease caching and speed up operations\n \"\"\"\n user_agent_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n def __init__(self, ticker: str, session=None):\n self.ticker = ticker\n self._session = session or requests\n\n def get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):\n proxy = self._get_proxy(proxy)\n response = self._session.get(\n url=url,\n params=params,\n proxies=proxy,\n timeout=timeout,\n headers=user_agent_headers or self.user_agent_headers)\n return response\n\n @lru_cache_freezeargs\n @lru_cache(maxsize=cache_maxsize)\n def cache_get(self, url, user_agent_headers=None, params=None, proxy=None, timeout=30):\n return self.get(url, user_agent_headers, params, proxy, timeout)\n\n def _get_proxy(self, proxy):\n # setup proxy in requests format\n if proxy is not None:\n if isinstance(proxy, dict) and \"https\" in proxy:\n proxy = proxy[\"https\"]\n proxy = {\"https\": proxy}\n return proxy\n\n @lru_cache_freezeargs\n @lru_cache(maxsize=cache_maxsize)\n def get_json_data_stores(self, sub_page: str = None, proxy=None) -> dict:\n '''\n get_json_data_stores returns a python dictionary of the data stores in yahoo finance web page.\n '''\n if sub_page:\n ticker_url = \"{}/{}/{}\".format(_SCRAPE_URL_, self.ticker, sub_page)\n else:\n ticker_url = \"{}/{}\".format(_SCRAPE_URL_, self.ticker)\n\n html = self.get(url=ticker_url, proxy=proxy).text\n\n # The actual json-data for stores is in a javascript assignment in the webpage\n try:\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n except IndexError:\n # Fetch failed, probably because Yahoo spam triggered\n return {}\n\n data = json.loads(json_str)\n\n stores = decrypt_cryptojs_aes_stores(data)\n if stores is None:\n # Maybe Yahoo returned old format, not encrypted\n if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n stores = data['context']['dispatcher']['stores']\n if stores is None:\n raise Exception(f\"{self.ticker}: Failed to extract data stores from web request\")\n\n # return data\n new_data = json.dumps(stores).replace('{}', 'null')\n new_data = re.sub(\n r'{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)}', r'\\1', new_data)\n\n return json.loads(new_data)\n", "path": "yfinance/data.py"}]} | 3,249 | 941 |
gh_patches_debug_2937 | rasdani/github-patches | git_diff | openai__gym-1708 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug in PixelObservationWrapper
Error log
```
env = PixelObservationWrapper(env, pixels_only=True)
File "/home/tsan/Desktop/gym/gym/wrappers/pixel_observation.py", line 89, in __init__
pixels = self.env.render(**render_kwargs)
File "/home/tsan/Desktop/gym/gym/core.py", line 233, in render
return self.env.render(mode, **kwargs)
TypeError: render() got an unexpected keyword argument 'pixels'
```
Can be reproduced by running
```
import gym
from gym.wrappers.pixel_observation import PixelObservationWrapper # pylint: disable=E0401
env = gym.make('Acrobot-v1')
env.reset()
env = PixelObservationWrapper(env, pixels_only=True)
env.step(0)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gym/wrappers/pixel_observation.py`
Content:
```
1 """An observation wrapper that augments observations by pixel values."""
2
3 import collections
4 import copy
5
6 import numpy as np
7
8 from gym import spaces
9 from gym import ObservationWrapper
10
11 STATE_KEY = 'state'
12
13
14 class PixelObservationWrapper(ObservationWrapper):
15 """Augment observations by pixel values."""
16
17 def __init__(self,
18 env,
19 pixels_only=True,
20 render_kwargs=None,
21 pixel_keys=('pixels', )):
22 """Initializes a new pixel Wrapper.
23
24 Args:
25 env: The environment to wrap.
26 pixels_only: If `True` (default), the original observation returned
27 by the wrapped environment will be discarded, and a dictionary
28 observation will only include pixels. If `False`, the
29 observation dictionary will contain both the original
30 observations and the pixel observations.
31 render_kwargs: Optional `dict` containing keyword arguments passed
32 to the `self.render` method.
33 pixel_keys: Optional custom string specifying the pixel
34 observation's key in the `OrderedDict` of observations.
35 Defaults to 'pixels'.
36
37 Raises:
38 ValueError: If `env`'s observation spec is not compatible with the
39 wrapper. Supported formats are a single array, or a dict of
40 arrays.
41 ValueError: If `env`'s observation already contains any of the
42 specified `pixel_keys`.
43 """
44
45 super(PixelObservationWrapper, self).__init__(env)
46
47 if render_kwargs is None:
48 render_kwargs = {}
49
50 for key in pixel_keys:
51 render_kwargs.setdefault(key, {})
52
53 render_mode = render_kwargs[key].pop('mode', 'rgb_array')
54 assert render_mode == 'rgb_array', render_mode
55 render_kwargs[key]['mode'] = 'rgb_array'
56
57 wrapped_observation_space = env.observation_space
58
59 if isinstance(wrapped_observation_space, spaces.Box):
60 self._observation_is_dict = False
61 invalid_keys = set([STATE_KEY])
62 elif isinstance(wrapped_observation_space,
63 (spaces.Dict, collections.MutableMapping)):
64 self._observation_is_dict = True
65 invalid_keys = set(wrapped_observation_space.spaces.keys())
66 else:
67 raise ValueError("Unsupported observation space structure.")
68
69 if not pixels_only:
70 # Make sure that now keys in the `pixel_keys` overlap with
71 # `observation_keys`
72 overlapping_keys = set(pixel_keys) & set(invalid_keys)
73 if overlapping_keys:
74 raise ValueError("Duplicate or reserved pixel keys {!r}."
75 .format(overlapping_keys))
76
77 if pixels_only:
78 self.observation_space = spaces.Dict()
79 elif self._observation_is_dict:
80 self.observation_space = copy.deepcopy(wrapped_observation_space)
81 else:
82 self.observation_space = spaces.Dict()
83 self.observation_space.spaces[STATE_KEY] = wrapped_observation_space
84
85 # Extend observation space with pixels.
86
87 pixels_spaces = {}
88 for pixel_key in pixel_keys:
89 pixels = self.env.render(**render_kwargs)
90
91 if np.issubdtype(pixels.dtype, np.integer):
92 low, high = (0, 255)
93 elif np.issubdtype(pixels.dtype, np.float):
94 low, high = (-float('inf'), float('inf'))
95 else:
96 raise TypeError(pixels.dtype)
97
98 pixels_space = spaces.Box(
99 shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)
100 pixels_spaces[pixel_key] = pixels_space
101
102 self.observation_space.spaces.update(pixels_spaces)
103
104 self._env = env
105 self._pixels_only = pixels_only
106 self._render_kwargs = render_kwargs
107 self._pixel_keys = pixel_keys
108
109 def observation(self, observation):
110 pixel_observation = self._add_pixel_observation(observation)
111 return pixel_observation
112
113 def _add_pixel_observation(self, observation):
114 if self._pixels_only:
115 observation = collections.OrderedDict()
116 elif self._observation_is_dict:
117 observation = type(observation)(observation)
118 else:
119 observation = collections.OrderedDict()
120 observation[STATE_KEY] = observation
121
122 pixel_observations = {
123 pixel_key: self.env.render(**self._render_kwargs[pixel_key])
124 for pixel_key in self._pixel_keys
125 }
126
127 observation.update(pixel_observations)
128
129 return observation
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py
--- a/gym/wrappers/pixel_observation.py
+++ b/gym/wrappers/pixel_observation.py
@@ -86,7 +86,7 @@
pixels_spaces = {}
for pixel_key in pixel_keys:
- pixels = self.env.render(**render_kwargs)
+ pixels = self.env.render(**render_kwargs[pixel_key])
if np.issubdtype(pixels.dtype, np.integer):
low, high = (0, 255)
| {"golden_diff": "diff --git a/gym/wrappers/pixel_observation.py b/gym/wrappers/pixel_observation.py\n--- a/gym/wrappers/pixel_observation.py\n+++ b/gym/wrappers/pixel_observation.py\n@@ -86,7 +86,7 @@\n \n pixels_spaces = {}\n for pixel_key in pixel_keys:\n- pixels = self.env.render(**render_kwargs)\n+ pixels = self.env.render(**render_kwargs[pixel_key])\n \n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n", "issue": "Bug in PixelObservationWrapper \nError log\r\n```\r\n env = PixelObservationWrapper(env, pixels_only=True)\r\n File \"/home/tsan/Desktop/gym/gym/wrappers/pixel_observation.py\", line 89, in __init__\r\n pixels = self.env.render(**render_kwargs)\r\n File \"/home/tsan/Desktop/gym/gym/core.py\", line 233, in render\r\n return self.env.render(mode, **kwargs)\r\nTypeError: render() got an unexpected keyword argument 'pixels'\r\n```\r\n\r\nCan be reproduced by running\r\n```\r\nimport gym\r\nfrom gym.wrappers.pixel_observation import PixelObservationWrapper # pylint: disable=E0401\r\n\r\nenv = gym.make('Acrobot-v1')\r\nenv.reset()\r\nenv = PixelObservationWrapper(env, pixels_only=True)\r\nenv.step(0)\r\n```\n", "before_files": [{"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs)\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n", "path": "gym/wrappers/pixel_observation.py"}], "after_files": [{"content": "\"\"\"An observation wrapper that augments observations by pixel values.\"\"\"\n\nimport collections\nimport copy\n\nimport numpy as np\n\nfrom gym import spaces\nfrom gym import ObservationWrapper\n\nSTATE_KEY = 'state'\n\n\nclass PixelObservationWrapper(ObservationWrapper):\n \"\"\"Augment observations by pixel values.\"\"\"\n\n def __init__(self,\n env,\n pixels_only=True,\n render_kwargs=None,\n pixel_keys=('pixels', )):\n \"\"\"Initializes a new pixel Wrapper.\n\n Args:\n env: The environment to wrap.\n pixels_only: If `True` (default), the original observation returned\n by the wrapped environment will be discarded, and a dictionary\n observation will only include pixels. If `False`, the\n observation dictionary will contain both the original\n observations and the pixel observations.\n render_kwargs: Optional `dict` containing keyword arguments passed\n to the `self.render` method.\n pixel_keys: Optional custom string specifying the pixel\n observation's key in the `OrderedDict` of observations.\n Defaults to 'pixels'.\n\n Raises:\n ValueError: If `env`'s observation spec is not compatible with the\n wrapper. Supported formats are a single array, or a dict of\n arrays.\n ValueError: If `env`'s observation already contains any of the\n specified `pixel_keys`.\n \"\"\"\n\n super(PixelObservationWrapper, self).__init__(env)\n\n if render_kwargs is None:\n render_kwargs = {}\n\n for key in pixel_keys:\n render_kwargs.setdefault(key, {})\n\n render_mode = render_kwargs[key].pop('mode', 'rgb_array')\n assert render_mode == 'rgb_array', render_mode\n render_kwargs[key]['mode'] = 'rgb_array'\n\n wrapped_observation_space = env.observation_space\n\n if isinstance(wrapped_observation_space, spaces.Box):\n self._observation_is_dict = False\n invalid_keys = set([STATE_KEY])\n elif isinstance(wrapped_observation_space,\n (spaces.Dict, collections.MutableMapping)):\n self._observation_is_dict = True\n invalid_keys = set(wrapped_observation_space.spaces.keys())\n else:\n raise ValueError(\"Unsupported observation space structure.\")\n\n if not pixels_only:\n # Make sure that now keys in the `pixel_keys` overlap with\n # `observation_keys`\n overlapping_keys = set(pixel_keys) & set(invalid_keys)\n if overlapping_keys:\n raise ValueError(\"Duplicate or reserved pixel keys {!r}.\"\n .format(overlapping_keys))\n\n if pixels_only:\n self.observation_space = spaces.Dict()\n elif self._observation_is_dict:\n self.observation_space = copy.deepcopy(wrapped_observation_space)\n else:\n self.observation_space = spaces.Dict()\n self.observation_space.spaces[STATE_KEY] = wrapped_observation_space\n\n # Extend observation space with pixels.\n\n pixels_spaces = {}\n for pixel_key in pixel_keys:\n pixels = self.env.render(**render_kwargs[pixel_key])\n\n if np.issubdtype(pixels.dtype, np.integer):\n low, high = (0, 255)\n elif np.issubdtype(pixels.dtype, np.float):\n low, high = (-float('inf'), float('inf'))\n else:\n raise TypeError(pixels.dtype)\n\n pixels_space = spaces.Box(\n shape=pixels.shape, low=low, high=high, dtype=pixels.dtype)\n pixels_spaces[pixel_key] = pixels_space\n\n self.observation_space.spaces.update(pixels_spaces)\n\n self._env = env\n self._pixels_only = pixels_only\n self._render_kwargs = render_kwargs\n self._pixel_keys = pixel_keys\n\n def observation(self, observation):\n pixel_observation = self._add_pixel_observation(observation)\n return pixel_observation\n\n def _add_pixel_observation(self, observation):\n if self._pixels_only:\n observation = collections.OrderedDict()\n elif self._observation_is_dict:\n observation = type(observation)(observation)\n else:\n observation = collections.OrderedDict()\n observation[STATE_KEY] = observation\n\n pixel_observations = {\n pixel_key: self.env.render(**self._render_kwargs[pixel_key])\n for pixel_key in self._pixel_keys\n }\n\n observation.update(pixel_observations)\n\n return observation\n", "path": "gym/wrappers/pixel_observation.py"}]} | 1,651 | 131 |
gh_patches_debug_29984 | rasdani/github-patches | git_diff | iterative__dvc-494 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc repro reproduces directories all the time
The directory was just created by `dvc run` and next `dvc repro` reproduces the directory one more time.
```
$ dvc repro -v dir1.dvc
Data /home/ubuntu/src/myrepo_test/data/file.txt with cache /home/ubuntu/src/myrepo_test/.dvc/cache/2db7e560e41eb16f6a78cb13d2645836 didn't change
/home/ubuntu/src/myrepo_test/data/file.txt.dvc didn't change
Data /home/ubuntu/src/myrepo_test/dir1 with cache /home/ubuntu/src/myrepo_test/.dvc/cache/3908923f1bc06b1f1279aad916f5cdbf changed
/home/ubuntu/src/myrepo_test/dir1.dvc changed
Data /home/ubuntu/src/myrepo_test/dir1 with cache /home/ubuntu/src/myrepo_test/.dvc/cache/3908923f1bc06b1f1279aad916f5cdbf changed
/home/ubuntu/src/myrepo_test/dir1.dvc changed
Removing '/home/ubuntu/src/myrepo_test/dir1/.head'
Removing '/home/ubuntu/src/myrepo_test/dir1/.tail'
Reproducing 'dir1.dvc':
./headdir.sh data/file.txt dir1
...
```
Details:
```
# headdir.sh
f=`basename $2`
head -n 3 $1 > $2/${f}.head
tail -n 3 $1 > $2/${f}.tail
```
`dvc run -d data/file.txt -o dir1 ./headdir.sh data/file.txt dir1`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/output.py`
Content:
```
1 import os
2 import stat
3 import yaml
4 from checksumdir import dirhash
5
6 from dvc.system import System
7 from dvc.utils import file_md5
8 from dvc.exceptions import DvcException
9
10
11 class OutputError(DvcException):
12 pass
13
14
15 class CmdOutputError(DvcException):
16 def __init__(self, path, msg):
17 super(CmdOutputError, self).__init__('Output file \'{}\' error: {}'.format(path, msg))
18
19
20 class CmdOutputNoCacheError(CmdOutputError):
21 def __init__(self, path):
22 super(CmdOutputNoCacheError, self).__init__(path, 'no cache')
23
24
25 class CmdOutputOutsideOfRepoError(CmdOutputError):
26 def __init__(self, path):
27 super(CmdOutputOutsideOfRepoError, self).__init__(path, 'outside of repository')
28
29
30 class CmdOutputDoesNotExistError(CmdOutputError):
31 def __init__(self, path):
32 super(CmdOutputDoesNotExistError, self).__init__(path, 'does not exist')
33
34
35 class CmdOutputIsNotFileOrDirError(CmdOutputError):
36 def __init__(self, path):
37 super(CmdOutputIsNotFileOrDirError, self).__init__(path, 'not a file or directory')
38
39
40 class CmdOutputAlreadyTrackedError(CmdOutputError):
41 def __init__(self, path):
42 super(CmdOutputAlreadyTrackedError, self).__init__(path, 'already tracked by scm(e.g. git)')
43
44
45 class Dependency(object):
46 PARAM_PATH = 'path'
47 PARAM_MD5 = 'md5'
48
49 def __init__(self, project, path, md5=None):
50 self.project = project
51 self.path = os.path.abspath(os.path.realpath(path))
52
53 if not self.path.startswith(self.project.root_dir):
54 raise CmdOutputOutsideOfRepoError(self.rel_path)
55
56 self.md5 = md5
57
58 @property
59 def dvc_path(self):
60 return os.path.relpath(self.path, self.project.root_dir)
61
62 @property
63 def rel_path(self):
64 return os.path.relpath(self.path, '.')
65
66 def _changed_md5(self):
67 if not os.path.exists(self.path):
68 return True
69
70 state = self.project.state.get(self.path)
71 if state and state.mtime == self.mtime() and state.inode == self.inode():
72 md5 = state.md5
73 else:
74 md5 = self.compute_md5()
75
76 return self.md5 != md5
77
78 def changed(self):
79 return self._changed_md5()
80
81 def compute_md5(self):
82 if os.path.isdir(self.path):
83 return dirhash(self.path, hashfunc='md5')
84 else:
85 return file_md5(self.path)[0]
86
87 def mtime(self):
88 return os.path.getmtime(self.path)
89
90 def inode(self):
91 return os.stat(self.path).st_ino
92
93 def save(self):
94 if not os.path.exists(self.path):
95 raise CmdOutputDoesNotExistError(self.rel_path)
96
97 if not os.path.isfile(self.path) and not os.path.isdir(self.path):
98 raise CmdOutputIsNotFileOrDirError(self.rel_path)
99
100 state = self.project.state.get(self.path)
101 if state and state.mtime == self.mtime() and state.inode == self.inode():
102 md5 = state.md5
103 msg = '{} using md5 {} from state file'
104 self.project.logger.debug(msg.format(self.path, md5))
105 self.md5 = md5
106 else:
107 self.md5 = self.compute_md5()
108 self.project.state.update(self.path, self.md5, self.mtime(), self.inode())
109
110 def dumpd(self, cwd):
111 return {
112 Output.PARAM_PATH: os.path.relpath(self.path, cwd),
113 Output.PARAM_MD5: self.md5,
114 }
115
116 @classmethod
117 def loadd(cls, project, d, cwd=os.curdir):
118 path = os.path.join(cwd, d[Output.PARAM_PATH])
119 md5 = d.get(Output.PARAM_MD5, None)
120 return cls(project, path, md5=md5)
121
122 @classmethod
123 def loadd_from(cls, project, d_list, cwd=os.curdir):
124 return [cls.loadd(project, x, cwd=cwd) for x in d_list]
125
126 @classmethod
127 def loads(cls, project, s, cwd=os.curdir):
128 return cls(project, os.path.join(cwd, s), md5=None)
129
130 @classmethod
131 def loads_from(cls, project, s_list, cwd=os.curdir):
132 return [cls.loads(project, x, cwd=cwd) for x in s_list]
133
134 def stage(self):
135 for stage in self.project.stages():
136 for out in stage.outs:
137 if self.path == out.path:
138 return stage
139 return None
140
141
142 class Output(Dependency):
143 PARAM_CACHE = 'cache'
144
145 def __init__(self, project, path, md5=None, use_cache=True):
146 super(Output, self).__init__(project, path, md5=md5)
147 self.use_cache = use_cache
148
149 @property
150 def cache(self):
151 return self.project.cache.get(self.md5)
152
153 def dumpd(self, cwd):
154 ret = super(Output, self).dumpd(cwd)
155 ret[Output.PARAM_CACHE] = self.use_cache
156 return ret
157
158 @classmethod
159 def loadd(cls, project, d, cwd=os.curdir):
160 ret = super(Output, cls).loadd(project, d, cwd=cwd)
161 ret.use_cache = d.get(Output.PARAM_CACHE, True)
162 return ret
163
164 @classmethod
165 def loads(cls, project, s, use_cache=True, cwd=os.curdir):
166 ret = super(Output, cls).loads(project, s, cwd=cwd)
167 ret.use_cache = use_cache
168 return ret
169
170 @classmethod
171 def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):
172 return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]
173
174 def changed(self):
175 ret = True
176
177 if not self.use_cache:
178 ret = super(Output, self).changed()
179 elif os.path.exists(self.path) and \
180 os.path.exists(self.cache) and \
181 System.samefile(self.path, self.cache) and \
182 os.stat(self.cache).st_mode & stat.S_IREAD:
183 ret = False
184
185 msg = "Data {} with cache {} "
186 if ret:
187 msg += "changed"
188 else:
189 msg += "didn't change"
190 self.project.logger.debug(msg.format(self.path, self.cache))
191
192 return ret
193
194 def hardlink(self, src, link):
195 self.project.logger.debug("creating hardlink {} -> {}".format(src, link))
196 System.hardlink(src, link)
197 os.chmod(src, stat.S_IREAD)
198
199 def dir_cache(self):
200 res = {}
201 for root, dirs, files in os.walk(self.cache):
202 for fname in files:
203 path = os.path.join(root, fname)
204 relpath = os.path.relpath(path, self.cache)
205 with open(path, 'r') as fd:
206 d = yaml.safe_load(fd)
207 md5 = d[Output.PARAM_MD5]
208 res[relpath] = self.project.cache.get(md5)
209 return res
210
211 def checkout(self):
212 if not self.use_cache:
213 return
214
215 self.project.logger.debug("Checking out {} with cache {}".format(self.path, self.cache))
216
217 if not self.changed():
218 msg = "Data {} with cache {} didn't change, skipping checkout."
219 self.project.logger.debug(msg.format(self.path, self.cache))
220 return
221
222 if not os.path.exists(self.cache):
223 self.project.logger.warn(u'\'{}\': cache file not found'.format(self.dvc_path))
224 self.remove()
225 return
226
227 if os.path.exists(self.path):
228 msg = "Data {} exists. Removing before checkout"
229 self.project.logger.debug(msg.format(self.path))
230 self.remove()
231
232 if os.path.isfile(self.cache):
233 self.hardlink(self.cache, self.path)
234 return
235
236 for relpath, cache in self.dir_cache().items():
237 path = os.path.join(self.path, relpath)
238 dname = os.path.dirname(path)
239
240 if not os.path.exists(dname):
241 os.makedirs(dname)
242
243 self.hardlink(cache, path)
244
245 def save(self):
246 super(Output, self).save()
247
248 if not self.use_cache:
249 return
250
251 self.project.logger.debug("Saving {} to {}".format(self.path, self.cache))
252
253 if self.project.scm.is_tracked(self.path):
254 raise CmdOutputAlreadyTrackedError(self.rel_path)
255
256 if not self.changed():
257 return
258
259 if os.path.exists(self.cache):
260 # This means that we already have cache for this data.
261 # We remove data and link it to existing cache to save
262 # some space.
263 msg = "Cache {} already exists, performing checkout for {}"
264 self.project.logger.debug(msg.format(self.cache, self.path))
265 self.checkout()
266 return
267
268 if os.path.isfile(self.path):
269 self.hardlink(self.path, self.cache)
270 return
271
272 for root, dirs, files in os.walk(self.path):
273 for fname in files:
274 path = os.path.join(root, fname)
275 relpath = os.path.relpath(path, self.path)
276 md5 = file_md5(path)[0]
277 cache = self.project.cache.get(md5)
278 cache_info = os.path.join(self.cache, relpath)
279 cache_dir = os.path.dirname(cache_info)
280
281 if os.path.exists(cache):
282 self._remove(path, None)
283 self.hardlink(cache, path)
284 else:
285 self.hardlink(path, cache)
286
287 if not os.path.exists(cache_dir):
288 os.makedirs(cache_dir)
289
290 with open(cache_info, 'w') as fd:
291 yaml.safe_dump({self.PARAM_MD5: md5}, fd, default_flow_style=False)
292
293 def _remove(self, path, cache):
294 self.project.logger.debug("Removing '{}'".format(path))
295 os.chmod(path, stat.S_IWUSR)
296 os.unlink(path)
297 if cache != None and os.path.exists(cache):
298 os.chmod(cache, stat.S_IREAD)
299
300 def remove(self):
301 if not os.path.exists(self.path):
302 return
303
304 if os.path.isfile(self.path):
305 self._remove(self.path, self.cache)
306 return
307
308 caches = self.dir_cache()
309 for root, dirs, files in os.walk(self.path, topdown=False):
310 for d in dirs:
311 path = os.path.join(root, d)
312 os.rmdir(path)
313 for f in files:
314 path = os.path.join(root, f)
315 relpath = os.path.relpath(path, self.path)
316 cache = caches.get(relpath, None)
317 self._remove(path, cache)
318 os.rmdir(self.path)
319
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dvc/output.py b/dvc/output.py
--- a/dvc/output.py
+++ b/dvc/output.py
@@ -171,16 +171,48 @@
def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):
return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]
+ def _changed_file(self, path, cache):
+ if os.path.isfile(path) and \
+ os.path.isfile(cache) and \
+ System.samefile(path, cache) and \
+ os.stat(cache).st_mode & stat.S_IREAD:
+ return False
+
+ return True
+
+ def _changed_dir(self):
+ if not os.path.isdir(self.path) or not os.path.isdir(self.cache):
+ return True
+
+ for root, dirs, files in os.walk(self.path):
+ for fname in files:
+ path = os.path.join(root, fname)
+ mtime = os.path.getmtime(path)
+ inode = os.stat(path).st_ino
+
+ state = self.project.state.get(path)
+ if state and state.mtime == mtime and state.inode == inode:
+ md5 = state.md5
+ else:
+ md5 = file_md5(path)[0]
+
+ cache = self.project.cache.get(md5)
+ if self._changed_file(path, cache):
+ return True
+
+ return False
+
def changed(self):
ret = True
if not self.use_cache:
ret = super(Output, self).changed()
- elif os.path.exists(self.path) and \
- os.path.exists(self.cache) and \
- System.samefile(self.path, self.cache) and \
- os.stat(self.cache).st_mode & stat.S_IREAD:
- ret = False
+ elif os.path.isfile(self.path) and \
+ os.path.isfile(self.cache):
+ ret = self._changed_file(self.path, self.cache)
+ elif os.path.isdir(self.path) and \
+ os.path.isdir(self.cache):
+ ret = self._changed_dir()
msg = "Data {} with cache {} "
if ret:
| {"golden_diff": "diff --git a/dvc/output.py b/dvc/output.py\n--- a/dvc/output.py\n+++ b/dvc/output.py\n@@ -171,16 +171,48 @@\n def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):\n return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]\n \n+ def _changed_file(self, path, cache):\n+ if os.path.isfile(path) and \\\n+ os.path.isfile(cache) and \\\n+ System.samefile(path, cache) and \\\n+ os.stat(cache).st_mode & stat.S_IREAD:\n+ return False\n+\n+ return True\n+\n+ def _changed_dir(self):\n+ if not os.path.isdir(self.path) or not os.path.isdir(self.cache):\n+ return True\n+\n+ for root, dirs, files in os.walk(self.path):\n+ for fname in files:\n+ path = os.path.join(root, fname)\n+ mtime = os.path.getmtime(path)\n+ inode = os.stat(path).st_ino\n+\n+ state = self.project.state.get(path)\n+ if state and state.mtime == mtime and state.inode == inode:\n+ md5 = state.md5\n+ else:\n+ md5 = file_md5(path)[0]\n+\n+ cache = self.project.cache.get(md5)\n+ if self._changed_file(path, cache):\n+ return True\n+\n+ return False\n+\n def changed(self):\n ret = True\n \n if not self.use_cache:\n ret = super(Output, self).changed()\n- elif os.path.exists(self.path) and \\\n- os.path.exists(self.cache) and \\\n- System.samefile(self.path, self.cache) and \\\n- os.stat(self.cache).st_mode & stat.S_IREAD:\n- ret = False\n+ elif os.path.isfile(self.path) and \\\n+ os.path.isfile(self.cache):\n+ ret = self._changed_file(self.path, self.cache)\n+ elif os.path.isdir(self.path) and \\\n+ os.path.isdir(self.cache):\n+ ret = self._changed_dir()\n \n msg = \"Data {} with cache {} \"\n if ret:\n", "issue": "dvc repro reproduces directories all the time\nThe directory was just created by `dvc run` and next `dvc repro` reproduces the directory one more time.\r\n\r\n```\r\n$ dvc repro -v dir1.dvc\r\nData /home/ubuntu/src/myrepo_test/data/file.txt with cache /home/ubuntu/src/myrepo_test/.dvc/cache/2db7e560e41eb16f6a78cb13d2645836 didn't change\r\n/home/ubuntu/src/myrepo_test/data/file.txt.dvc didn't change\r\nData /home/ubuntu/src/myrepo_test/dir1 with cache /home/ubuntu/src/myrepo_test/.dvc/cache/3908923f1bc06b1f1279aad916f5cdbf changed\r\n/home/ubuntu/src/myrepo_test/dir1.dvc changed\r\nData /home/ubuntu/src/myrepo_test/dir1 with cache /home/ubuntu/src/myrepo_test/.dvc/cache/3908923f1bc06b1f1279aad916f5cdbf changed\r\n/home/ubuntu/src/myrepo_test/dir1.dvc changed\r\nRemoving '/home/ubuntu/src/myrepo_test/dir1/.head'\r\nRemoving '/home/ubuntu/src/myrepo_test/dir1/.tail'\r\nReproducing 'dir1.dvc':\r\n\t./headdir.sh data/file.txt dir1\r\n...\r\n```\r\n\r\nDetails:\r\n```\r\n# headdir.sh\r\nf=`basename $2`\r\nhead -n 3 $1 > $2/${f}.head\r\ntail -n 3 $1 > $2/${f}.tail\r\n```\r\n\r\n`dvc run -d data/file.txt -o dir1 ./headdir.sh data/file.txt dir1`\n", "before_files": [{"content": "import os\nimport stat\nimport yaml\nfrom checksumdir import dirhash\n\nfrom dvc.system import System\nfrom dvc.utils import file_md5\nfrom dvc.exceptions import DvcException\n\n\nclass OutputError(DvcException):\n pass\n\n\nclass CmdOutputError(DvcException):\n def __init__(self, path, msg):\n super(CmdOutputError, self).__init__('Output file \\'{}\\' error: {}'.format(path, msg))\n\n\nclass CmdOutputNoCacheError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputNoCacheError, self).__init__(path, 'no cache')\n\n\nclass CmdOutputOutsideOfRepoError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputOutsideOfRepoError, self).__init__(path, 'outside of repository')\n\n\nclass CmdOutputDoesNotExistError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputDoesNotExistError, self).__init__(path, 'does not exist')\n\n\nclass CmdOutputIsNotFileOrDirError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputIsNotFileOrDirError, self).__init__(path, 'not a file or directory')\n\n\nclass CmdOutputAlreadyTrackedError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputAlreadyTrackedError, self).__init__(path, 'already tracked by scm(e.g. git)')\n\n\nclass Dependency(object):\n PARAM_PATH = 'path'\n PARAM_MD5 = 'md5'\n\n def __init__(self, project, path, md5=None):\n self.project = project\n self.path = os.path.abspath(os.path.realpath(path))\n\n if not self.path.startswith(self.project.root_dir):\n raise CmdOutputOutsideOfRepoError(self.rel_path)\n\n self.md5 = md5\n\n @property\n def dvc_path(self):\n return os.path.relpath(self.path, self.project.root_dir)\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path, '.')\n\n def _changed_md5(self):\n if not os.path.exists(self.path):\n return True\n\n state = self.project.state.get(self.path)\n if state and state.mtime == self.mtime() and state.inode == self.inode():\n md5 = state.md5\n else:\n md5 = self.compute_md5()\n\n return self.md5 != md5\n\n def changed(self):\n return self._changed_md5()\n\n def compute_md5(self):\n if os.path.isdir(self.path):\n return dirhash(self.path, hashfunc='md5')\n else:\n return file_md5(self.path)[0]\n\n def mtime(self):\n return os.path.getmtime(self.path)\n\n def inode(self):\n return os.stat(self.path).st_ino\n\n def save(self):\n if not os.path.exists(self.path):\n raise CmdOutputDoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) and not os.path.isdir(self.path):\n raise CmdOutputIsNotFileOrDirError(self.rel_path)\n\n state = self.project.state.get(self.path)\n if state and state.mtime == self.mtime() and state.inode == self.inode():\n md5 = state.md5\n msg = '{} using md5 {} from state file'\n self.project.logger.debug(msg.format(self.path, md5))\n self.md5 = md5\n else:\n self.md5 = self.compute_md5()\n self.project.state.update(self.path, self.md5, self.mtime(), self.inode())\n\n def dumpd(self, cwd):\n return {\n Output.PARAM_PATH: os.path.relpath(self.path, cwd),\n Output.PARAM_MD5: self.md5,\n }\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n path = os.path.join(cwd, d[Output.PARAM_PATH])\n md5 = d.get(Output.PARAM_MD5, None)\n return cls(project, path, md5=md5)\n\n @classmethod\n def loadd_from(cls, project, d_list, cwd=os.curdir):\n return [cls.loadd(project, x, cwd=cwd) for x in d_list]\n\n @classmethod\n def loads(cls, project, s, cwd=os.curdir):\n return cls(project, os.path.join(cwd, s), md5=None)\n\n @classmethod\n def loads_from(cls, project, s_list, cwd=os.curdir):\n return [cls.loads(project, x, cwd=cwd) for x in s_list]\n\n def stage(self):\n for stage in self.project.stages():\n for out in stage.outs:\n if self.path == out.path:\n return stage\n return None\n\n\nclass Output(Dependency):\n PARAM_CACHE = 'cache'\n\n def __init__(self, project, path, md5=None, use_cache=True):\n super(Output, self).__init__(project, path, md5=md5)\n self.use_cache = use_cache\n\n @property\n def cache(self):\n return self.project.cache.get(self.md5)\n\n def dumpd(self, cwd):\n ret = super(Output, self).dumpd(cwd)\n ret[Output.PARAM_CACHE] = self.use_cache\n return ret\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n ret = super(Output, cls).loadd(project, d, cwd=cwd)\n ret.use_cache = d.get(Output.PARAM_CACHE, True)\n return ret\n\n @classmethod\n def loads(cls, project, s, use_cache=True, cwd=os.curdir):\n ret = super(Output, cls).loads(project, s, cwd=cwd)\n ret.use_cache = use_cache\n return ret\n\n @classmethod\n def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):\n return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]\n\n def changed(self):\n ret = True\n\n if not self.use_cache:\n ret = super(Output, self).changed()\n elif os.path.exists(self.path) and \\\n os.path.exists(self.cache) and \\\n System.samefile(self.path, self.cache) and \\\n os.stat(self.cache).st_mode & stat.S_IREAD:\n ret = False\n\n msg = \"Data {} with cache {} \"\n if ret:\n msg += \"changed\"\n else:\n msg += \"didn't change\"\n self.project.logger.debug(msg.format(self.path, self.cache))\n\n return ret\n\n def hardlink(self, src, link):\n self.project.logger.debug(\"creating hardlink {} -> {}\".format(src, link))\n System.hardlink(src, link)\n os.chmod(src, stat.S_IREAD)\n\n def dir_cache(self):\n res = {}\n for root, dirs, files in os.walk(self.cache):\n for fname in files:\n path = os.path.join(root, fname)\n relpath = os.path.relpath(path, self.cache)\n with open(path, 'r') as fd:\n d = yaml.safe_load(fd)\n md5 = d[Output.PARAM_MD5]\n res[relpath] = self.project.cache.get(md5)\n return res\n\n def checkout(self):\n if not self.use_cache:\n return\n\n self.project.logger.debug(\"Checking out {} with cache {}\".format(self.path, self.cache))\n\n if not self.changed():\n msg = \"Data {} with cache {} didn't change, skipping checkout.\"\n self.project.logger.debug(msg.format(self.path, self.cache))\n return\n\n if not os.path.exists(self.cache):\n self.project.logger.warn(u'\\'{}\\': cache file not found'.format(self.dvc_path))\n self.remove()\n return\n\n if os.path.exists(self.path):\n msg = \"Data {} exists. Removing before checkout\"\n self.project.logger.debug(msg.format(self.path))\n self.remove()\n\n if os.path.isfile(self.cache):\n self.hardlink(self.cache, self.path)\n return\n\n for relpath, cache in self.dir_cache().items():\n path = os.path.join(self.path, relpath)\n dname = os.path.dirname(path)\n\n if not os.path.exists(dname):\n os.makedirs(dname)\n\n self.hardlink(cache, path)\n\n def save(self):\n super(Output, self).save()\n\n if not self.use_cache:\n return\n\n self.project.logger.debug(\"Saving {} to {}\".format(self.path, self.cache))\n\n if self.project.scm.is_tracked(self.path):\n raise CmdOutputAlreadyTrackedError(self.rel_path)\n\n if not self.changed():\n return\n\n if os.path.exists(self.cache):\n # This means that we already have cache for this data.\n # We remove data and link it to existing cache to save\n # some space.\n msg = \"Cache {} already exists, performing checkout for {}\"\n self.project.logger.debug(msg.format(self.cache, self.path))\n self.checkout()\n return\n\n if os.path.isfile(self.path):\n self.hardlink(self.path, self.cache)\n return\n\n for root, dirs, files in os.walk(self.path):\n for fname in files:\n path = os.path.join(root, fname)\n relpath = os.path.relpath(path, self.path)\n md5 = file_md5(path)[0]\n cache = self.project.cache.get(md5)\n cache_info = os.path.join(self.cache, relpath)\n cache_dir = os.path.dirname(cache_info)\n\n if os.path.exists(cache):\n self._remove(path, None)\n self.hardlink(cache, path)\n else:\n self.hardlink(path, cache)\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n with open(cache_info, 'w') as fd:\n yaml.safe_dump({self.PARAM_MD5: md5}, fd, default_flow_style=False)\n\n def _remove(self, path, cache):\n self.project.logger.debug(\"Removing '{}'\".format(path))\n os.chmod(path, stat.S_IWUSR)\n os.unlink(path)\n if cache != None and os.path.exists(cache):\n os.chmod(cache, stat.S_IREAD)\n\n def remove(self):\n if not os.path.exists(self.path):\n return\n\n if os.path.isfile(self.path):\n self._remove(self.path, self.cache)\n return\n\n caches = self.dir_cache()\n for root, dirs, files in os.walk(self.path, topdown=False):\n for d in dirs:\n path = os.path.join(root, d)\n os.rmdir(path)\n for f in files:\n path = os.path.join(root, f)\n relpath = os.path.relpath(path, self.path)\n cache = caches.get(relpath, None)\n self._remove(path, cache)\n os.rmdir(self.path)\n", "path": "dvc/output.py"}], "after_files": [{"content": "import os\nimport stat\nimport yaml\nfrom checksumdir import dirhash\n\nfrom dvc.system import System\nfrom dvc.utils import file_md5\nfrom dvc.exceptions import DvcException\n\n\nclass OutputError(DvcException):\n pass\n\n\nclass CmdOutputError(DvcException):\n def __init__(self, path, msg):\n super(CmdOutputError, self).__init__('Output file \\'{}\\' error: {}'.format(path, msg))\n\n\nclass CmdOutputNoCacheError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputNoCacheError, self).__init__(path, 'no cache')\n\n\nclass CmdOutputOutsideOfRepoError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputOutsideOfRepoError, self).__init__(path, 'outside of repository')\n\n\nclass CmdOutputDoesNotExistError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputDoesNotExistError, self).__init__(path, 'does not exist')\n\n\nclass CmdOutputIsNotFileOrDirError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputIsNotFileOrDirError, self).__init__(path, 'not a file or directory')\n\n\nclass CmdOutputAlreadyTrackedError(CmdOutputError):\n def __init__(self, path):\n super(CmdOutputAlreadyTrackedError, self).__init__(path, 'already tracked by scm(e.g. git)')\n\n\nclass Dependency(object):\n PARAM_PATH = 'path'\n PARAM_MD5 = 'md5'\n\n def __init__(self, project, path, md5=None):\n self.project = project\n self.path = os.path.abspath(os.path.realpath(path))\n\n if not self.path.startswith(self.project.root_dir):\n raise CmdOutputOutsideOfRepoError(self.rel_path)\n\n self.md5 = md5\n\n @property\n def dvc_path(self):\n return os.path.relpath(self.path, self.project.root_dir)\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path, '.')\n\n def _changed_md5(self):\n if not os.path.exists(self.path):\n return True\n\n state = self.project.state.get(self.path)\n if state and state.mtime == self.mtime() and state.inode == self.inode():\n md5 = state.md5\n else:\n md5 = self.compute_md5()\n\n return self.md5 != md5\n\n def changed(self):\n return self._changed_md5()\n\n def compute_md5(self):\n if os.path.isdir(self.path):\n return dirhash(self.path, hashfunc='md5')\n else:\n return file_md5(self.path)[0]\n\n def mtime(self):\n return os.path.getmtime(self.path)\n\n def inode(self):\n return os.stat(self.path).st_ino\n\n def save(self):\n if not os.path.exists(self.path):\n raise CmdOutputDoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) and not os.path.isdir(self.path):\n raise CmdOutputIsNotFileOrDirError(self.rel_path)\n\n state = self.project.state.get(self.path)\n if state and state.mtime == self.mtime() and state.inode == self.inode():\n md5 = state.md5\n msg = '{} using md5 {} from state file'\n self.project.logger.debug(msg.format(self.path, md5))\n self.md5 = md5\n else:\n self.md5 = self.compute_md5()\n self.project.state.update(self.path, self.md5, self.mtime(), self.inode())\n\n def dumpd(self, cwd):\n return {\n Output.PARAM_PATH: os.path.relpath(self.path, cwd),\n Output.PARAM_MD5: self.md5,\n }\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n path = os.path.join(cwd, d[Output.PARAM_PATH])\n md5 = d.get(Output.PARAM_MD5, None)\n return cls(project, path, md5=md5)\n\n @classmethod\n def loadd_from(cls, project, d_list, cwd=os.curdir):\n return [cls.loadd(project, x, cwd=cwd) for x in d_list]\n\n @classmethod\n def loads(cls, project, s, cwd=os.curdir):\n return cls(project, os.path.join(cwd, s), md5=None)\n\n @classmethod\n def loads_from(cls, project, s_list, cwd=os.curdir):\n return [cls.loads(project, x, cwd=cwd) for x in s_list]\n\n def stage(self):\n for stage in self.project.stages():\n for out in stage.outs:\n if self.path == out.path:\n return stage\n return None\n\n\nclass Output(Dependency):\n PARAM_CACHE = 'cache'\n\n def __init__(self, project, path, md5=None, use_cache=True):\n super(Output, self).__init__(project, path, md5=md5)\n self.use_cache = use_cache\n\n @property\n def cache(self):\n return self.project.cache.get(self.md5)\n\n def dumpd(self, cwd):\n ret = super(Output, self).dumpd(cwd)\n ret[Output.PARAM_CACHE] = self.use_cache\n return ret\n\n @classmethod\n def loadd(cls, project, d, cwd=os.curdir):\n ret = super(Output, cls).loadd(project, d, cwd=cwd)\n ret.use_cache = d.get(Output.PARAM_CACHE, True)\n return ret\n\n @classmethod\n def loads(cls, project, s, use_cache=True, cwd=os.curdir):\n ret = super(Output, cls).loads(project, s, cwd=cwd)\n ret.use_cache = use_cache\n return ret\n\n @classmethod\n def loads_from(cls, project, s_list, use_cache=False, cwd=os.curdir):\n return [cls.loads(project, x, use_cache=use_cache, cwd=cwd) for x in s_list]\n\n def _changed_file(self, path, cache):\n if os.path.isfile(path) and \\\n os.path.isfile(cache) and \\\n System.samefile(path, cache) and \\\n os.stat(cache).st_mode & stat.S_IREAD:\n return False\n\n return True\n\n def _changed_dir(self):\n if not os.path.isdir(self.path) or not os.path.isdir(self.cache):\n return True\n\n for root, dirs, files in os.walk(self.path):\n for fname in files:\n path = os.path.join(root, fname)\n mtime = os.path.getmtime(path)\n inode = os.stat(path).st_ino\n\n state = self.project.state.get(path)\n if state and state.mtime == mtime and state.inode == inode:\n md5 = state.md5\n else:\n md5 = file_md5(path)[0]\n\n cache = self.project.cache.get(md5)\n if self._changed_file(path, cache):\n return True\n\n return False\n\n def changed(self):\n ret = True\n\n if not self.use_cache:\n ret = super(Output, self).changed()\n elif os.path.isfile(self.path) and \\\n os.path.isfile(self.cache):\n ret = self._changed_file(self.path, self.cache)\n elif os.path.isdir(self.path) and \\\n os.path.isdir(self.cache):\n ret = self._changed_dir()\n\n msg = \"Data {} with cache {} \"\n if ret:\n msg += \"changed\"\n else:\n msg += \"didn't change\"\n self.project.logger.debug(msg.format(self.path, self.cache))\n\n return ret\n\n def hardlink(self, src, link):\n self.project.logger.debug(\"creating hardlink {} -> {}\".format(src, link))\n System.hardlink(src, link)\n os.chmod(src, stat.S_IREAD)\n\n def dir_cache(self):\n res = {}\n for root, dirs, files in os.walk(self.cache):\n for fname in files:\n path = os.path.join(root, fname)\n relpath = os.path.relpath(path, self.cache)\n with open(path, 'r') as fd:\n d = yaml.safe_load(fd)\n md5 = d[Output.PARAM_MD5]\n res[relpath] = self.project.cache.get(md5)\n return res\n\n def checkout(self):\n if not self.use_cache:\n return\n\n self.project.logger.debug(\"Checking out {} with cache {}\".format(self.path, self.cache))\n\n if not self.changed():\n msg = \"Data {} with cache {} didn't change, skipping checkout.\"\n self.project.logger.debug(msg.format(self.path, self.cache))\n return\n\n if not os.path.exists(self.cache):\n self.project.logger.warn(u'\\'{}\\': cache file not found'.format(self.dvc_path))\n self.remove()\n return\n\n if os.path.exists(self.path):\n msg = \"Data {} exists. Removing before checkout\"\n self.project.logger.debug(msg.format(self.path))\n self.remove()\n\n if os.path.isfile(self.cache):\n self.hardlink(self.cache, self.path)\n return\n\n for relpath, cache in self.dir_cache().items():\n path = os.path.join(self.path, relpath)\n dname = os.path.dirname(path)\n\n if not os.path.exists(dname):\n os.makedirs(dname)\n\n self.hardlink(cache, path)\n\n def save(self):\n super(Output, self).save()\n\n if not self.use_cache:\n return\n\n self.project.logger.debug(\"Saving {} to {}\".format(self.path, self.cache))\n\n if self.project.scm.is_tracked(self.path):\n raise CmdOutputAlreadyTrackedError(self.rel_path)\n\n if not self.changed():\n return\n\n if os.path.exists(self.cache):\n # This means that we already have cache for this data.\n # We remove data and link it to existing cache to save\n # some space.\n msg = \"Cache {} already exists, performing checkout for {}\"\n self.project.logger.debug(msg.format(self.cache, self.path))\n self.checkout()\n return\n\n if os.path.isfile(self.path):\n self.hardlink(self.path, self.cache)\n return\n\n for root, dirs, files in os.walk(self.path):\n for fname in files:\n path = os.path.join(root, fname)\n relpath = os.path.relpath(path, self.path)\n md5 = file_md5(path)[0]\n cache = self.project.cache.get(md5)\n cache_info = os.path.join(self.cache, relpath)\n cache_dir = os.path.dirname(cache_info)\n\n if os.path.exists(cache):\n self._remove(path, None)\n self.hardlink(cache, path)\n else:\n self.hardlink(path, cache)\n\n if not os.path.exists(cache_dir):\n os.makedirs(cache_dir)\n\n with open(cache_info, 'w') as fd:\n yaml.safe_dump({self.PARAM_MD5: md5}, fd, default_flow_style=False)\n\n def _remove(self, path, cache):\n self.project.logger.debug(\"Removing '{}'\".format(path))\n os.chmod(path, stat.S_IWUSR)\n os.unlink(path)\n if cache != None and os.path.exists(cache):\n os.chmod(cache, stat.S_IREAD)\n\n def remove(self):\n if not os.path.exists(self.path):\n return\n\n if os.path.isfile(self.path):\n self._remove(self.path, self.cache)\n return\n\n caches = self.dir_cache()\n for root, dirs, files in os.walk(self.path, topdown=False):\n for d in dirs:\n path = os.path.join(root, d)\n os.rmdir(path)\n for f in files:\n path = os.path.join(root, f)\n relpath = os.path.relpath(path, self.path)\n cache = caches.get(relpath, None)\n self._remove(path, cache)\n os.rmdir(self.path)\n", "path": "dvc/output.py"}]} | 3,873 | 494 |
gh_patches_debug_908 | rasdani/github-patches | git_diff | mlflow__mlflow-9827 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC-FIX] Doc for Run.inputs erroneously refers to Run.data
### Willingness to contribute
No. I cannot contribute a documentation fix at this time.
### URL(s) with the issue
https://www.mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.Run
### Description of proposal (what needs changing)
In the Run doc page, the doc for Run.inputs refers to Run.data instead of Run.input.
property inputs
The run inputs, including dataset inputs
Return type
mlflow.entities.RunData
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/entities/run.py`
Content:
```
1 from typing import Any, Dict, Optional
2
3 from mlflow.entities._mlflow_object import _MLflowObject
4 from mlflow.entities.run_data import RunData
5 from mlflow.entities.run_info import RunInfo
6 from mlflow.entities.run_inputs import RunInputs
7 from mlflow.exceptions import MlflowException
8 from mlflow.protos.service_pb2 import Run as ProtoRun
9
10
11 class Run(_MLflowObject):
12 """
13 Run object.
14 """
15
16 def __init__(
17 self, run_info: RunInfo, run_data: RunData, run_inputs: Optional[RunInputs] = None
18 ) -> None:
19 if run_info is None:
20 raise MlflowException("run_info cannot be None")
21 self._info = run_info
22 self._data = run_data
23 self._inputs = run_inputs
24
25 @property
26 def info(self) -> RunInfo:
27 """
28 The run metadata, such as the run id, start time, and status.
29
30 :rtype: :py:class:`mlflow.entities.RunInfo`
31 """
32 return self._info
33
34 @property
35 def data(self) -> RunData:
36 """
37 The run data, including metrics, parameters, and tags.
38
39 :rtype: :py:class:`mlflow.entities.RunData`
40 """
41 return self._data
42
43 @property
44 def inputs(self) -> RunInputs:
45 """
46 The run inputs, including dataset inputs
47
48 :rtype: :py:class:`mlflow.entities.RunData`
49 """
50 return self._inputs
51
52 def to_proto(self):
53 run = ProtoRun()
54 run.info.MergeFrom(self.info.to_proto())
55 if self.data:
56 run.data.MergeFrom(self.data.to_proto())
57 if self.inputs:
58 run.inputs.MergeFrom(self.inputs.to_proto())
59 return run
60
61 @classmethod
62 def from_proto(cls, proto):
63 return cls(
64 RunInfo.from_proto(proto.info),
65 RunData.from_proto(proto.data),
66 RunInputs.from_proto(proto.inputs),
67 )
68
69 def to_dictionary(self) -> Dict[Any, Any]:
70 run_dict = {
71 "info": dict(self.info),
72 }
73 if self.data:
74 run_dict["data"] = self.data.to_dictionary()
75 if self.inputs:
76 run_dict["inputs"] = self.inputs.to_dictionary()
77 return run_dict
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlflow/entities/run.py b/mlflow/entities/run.py
--- a/mlflow/entities/run.py
+++ b/mlflow/entities/run.py
@@ -45,7 +45,7 @@
"""
The run inputs, including dataset inputs
- :rtype: :py:class:`mlflow.entities.RunData`
+ :rtype: :py:class:`mlflow.entities.RunInputs`
"""
return self._inputs
| {"golden_diff": "diff --git a/mlflow/entities/run.py b/mlflow/entities/run.py\n--- a/mlflow/entities/run.py\n+++ b/mlflow/entities/run.py\n@@ -45,7 +45,7 @@\n \"\"\"\n The run inputs, including dataset inputs\n \n- :rtype: :py:class:`mlflow.entities.RunData`\n+ :rtype: :py:class:`mlflow.entities.RunInputs`\n \"\"\"\n return self._inputs\n", "issue": "[DOC-FIX] Doc for Run.inputs erroneously refers to Run.data\n### Willingness to contribute\n\nNo. I cannot contribute a documentation fix at this time.\n\n### URL(s) with the issue\n\nhttps://www.mlflow.org/docs/latest/python_api/mlflow.entities.html#mlflow.entities.Run\n\n### Description of proposal (what needs changing)\n\nIn the Run doc page, the doc for Run.inputs refers to Run.data instead of Run.input.\r\n\r\n\r\nproperty inputs\r\nThe run inputs, including dataset inputs\r\n\r\nReturn type\r\nmlflow.entities.RunData\r\n\r\n\n", "before_files": [{"content": "from typing import Any, Dict, Optional\n\nfrom mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.run_data import RunData\nfrom mlflow.entities.run_info import RunInfo\nfrom mlflow.entities.run_inputs import RunInputs\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.service_pb2 import Run as ProtoRun\n\n\nclass Run(_MLflowObject):\n \"\"\"\n Run object.\n \"\"\"\n\n def __init__(\n self, run_info: RunInfo, run_data: RunData, run_inputs: Optional[RunInputs] = None\n ) -> None:\n if run_info is None:\n raise MlflowException(\"run_info cannot be None\")\n self._info = run_info\n self._data = run_data\n self._inputs = run_inputs\n\n @property\n def info(self) -> RunInfo:\n \"\"\"\n The run metadata, such as the run id, start time, and status.\n\n :rtype: :py:class:`mlflow.entities.RunInfo`\n \"\"\"\n return self._info\n\n @property\n def data(self) -> RunData:\n \"\"\"\n The run data, including metrics, parameters, and tags.\n\n :rtype: :py:class:`mlflow.entities.RunData`\n \"\"\"\n return self._data\n\n @property\n def inputs(self) -> RunInputs:\n \"\"\"\n The run inputs, including dataset inputs\n\n :rtype: :py:class:`mlflow.entities.RunData`\n \"\"\"\n return self._inputs\n\n def to_proto(self):\n run = ProtoRun()\n run.info.MergeFrom(self.info.to_proto())\n if self.data:\n run.data.MergeFrom(self.data.to_proto())\n if self.inputs:\n run.inputs.MergeFrom(self.inputs.to_proto())\n return run\n\n @classmethod\n def from_proto(cls, proto):\n return cls(\n RunInfo.from_proto(proto.info),\n RunData.from_proto(proto.data),\n RunInputs.from_proto(proto.inputs),\n )\n\n def to_dictionary(self) -> Dict[Any, Any]:\n run_dict = {\n \"info\": dict(self.info),\n }\n if self.data:\n run_dict[\"data\"] = self.data.to_dictionary()\n if self.inputs:\n run_dict[\"inputs\"] = self.inputs.to_dictionary()\n return run_dict\n", "path": "mlflow/entities/run.py"}], "after_files": [{"content": "from typing import Any, Dict, Optional\n\nfrom mlflow.entities._mlflow_object import _MLflowObject\nfrom mlflow.entities.run_data import RunData\nfrom mlflow.entities.run_info import RunInfo\nfrom mlflow.entities.run_inputs import RunInputs\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.protos.service_pb2 import Run as ProtoRun\n\n\nclass Run(_MLflowObject):\n \"\"\"\n Run object.\n \"\"\"\n\n def __init__(\n self, run_info: RunInfo, run_data: RunData, run_inputs: Optional[RunInputs] = None\n ) -> None:\n if run_info is None:\n raise MlflowException(\"run_info cannot be None\")\n self._info = run_info\n self._data = run_data\n self._inputs = run_inputs\n\n @property\n def info(self) -> RunInfo:\n \"\"\"\n The run metadata, such as the run id, start time, and status.\n\n :rtype: :py:class:`mlflow.entities.RunInfo`\n \"\"\"\n return self._info\n\n @property\n def data(self) -> RunData:\n \"\"\"\n The run data, including metrics, parameters, and tags.\n\n :rtype: :py:class:`mlflow.entities.RunData`\n \"\"\"\n return self._data\n\n @property\n def inputs(self) -> RunInputs:\n \"\"\"\n The run inputs, including dataset inputs\n\n :rtype: :py:class:`mlflow.entities.RunInputs`\n \"\"\"\n return self._inputs\n\n def to_proto(self):\n run = ProtoRun()\n run.info.MergeFrom(self.info.to_proto())\n if self.data:\n run.data.MergeFrom(self.data.to_proto())\n if self.inputs:\n run.inputs.MergeFrom(self.inputs.to_proto())\n return run\n\n @classmethod\n def from_proto(cls, proto):\n return cls(\n RunInfo.from_proto(proto.info),\n RunData.from_proto(proto.data),\n RunInputs.from_proto(proto.inputs),\n )\n\n def to_dictionary(self) -> Dict[Any, Any]:\n run_dict = {\n \"info\": dict(self.info),\n }\n if self.data:\n run_dict[\"data\"] = self.data.to_dictionary()\n if self.inputs:\n run_dict[\"inputs\"] = self.inputs.to_dictionary()\n return run_dict\n", "path": "mlflow/entities/run.py"}]} | 1,020 | 93 |
gh_patches_debug_23900 | rasdani/github-patches | git_diff | CTFd__CTFd-1823 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Submissions should link directly to the user that submitted
Submissions don't link directly to the user in team mode which means you need to search to see what user submitted for a given team.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/constants/config.py`
Content:
```
1 import json
2
3 from flask import url_for
4
5 from CTFd.constants import JinjaEnum, RawEnum
6 from CTFd.utils import get_config
7
8
9 class ConfigTypes(str, RawEnum):
10 CHALLENGE_VISIBILITY = "challenge_visibility"
11 SCORE_VISIBILITY = "score_visibility"
12 ACCOUNT_VISIBILITY = "account_visibility"
13 REGISTRATION_VISIBILITY = "registration_visibility"
14
15
16 @JinjaEnum
17 class ChallengeVisibilityTypes(str, RawEnum):
18 PUBLIC = "public"
19 PRIVATE = "private"
20 ADMINS = "admins"
21
22
23 @JinjaEnum
24 class ScoreVisibilityTypes(str, RawEnum):
25 PUBLIC = "public"
26 PRIVATE = "private"
27 HIDDEN = "hidden"
28 ADMINS = "admins"
29
30
31 @JinjaEnum
32 class AccountVisibilityTypes(str, RawEnum):
33 PUBLIC = "public"
34 PRIVATE = "private"
35 ADMINS = "admins"
36
37
38 @JinjaEnum
39 class RegistrationVisibilityTypes(str, RawEnum):
40 PUBLIC = "public"
41 PRIVATE = "private"
42
43
44 class _ConfigsWrapper:
45 def __getattr__(self, attr):
46 return get_config(attr)
47
48 @property
49 def ctf_name(self):
50 return get_config("ctf_name", default="CTFd")
51
52 @property
53 def ctf_small_icon(self):
54 icon = get_config("ctf_small_icon")
55 if icon:
56 return url_for("views.files", path=icon)
57 return url_for("views.themes", path="img/favicon.ico")
58
59 @property
60 def theme_header(self):
61 from CTFd.utils.helpers import markup
62
63 return markup(get_config("theme_header", default=""))
64
65 @property
66 def theme_footer(self):
67 from CTFd.utils.helpers import markup
68
69 return markup(get_config("theme_footer", default=""))
70
71 @property
72 def theme_settings(self):
73 return json.loads(get_config("theme_settings", default="null"))
74
75 @property
76 def tos_or_privacy(self):
77 tos = bool(get_config("tos_url") or get_config("tos_text"))
78 privacy = bool(get_config("privacy_url") or get_config("privacy_text"))
79 return tos or privacy
80
81 @property
82 def tos_link(self):
83 return get_config("tos_url", default=url_for("views.tos"))
84
85 @property
86 def privacy_link(self):
87 return get_config("privacy_url", default=url_for("views.privacy"))
88
89
90 Configs = _ConfigsWrapper()
91
```
Path: `CTFd/utils/modes/__init__.py`
Content:
```
1 from flask import url_for
2
3 from CTFd.models import Teams, Users
4 from CTFd.utils import get_config
5
6 USERS_MODE = "users"
7 TEAMS_MODE = "teams"
8
9
10 def generate_account_url(account_id, admin=False):
11 if get_config("user_mode") == USERS_MODE:
12 if admin:
13 return url_for("admin.users_detail", user_id=account_id)
14 else:
15 return url_for("users.public", user_id=account_id)
16 elif get_config("user_mode") == TEAMS_MODE:
17 if admin:
18 return url_for("admin.teams_detail", team_id=account_id)
19 else:
20 return url_for("teams.public", team_id=account_id)
21
22
23 def get_model():
24 if get_config("user_mode") == USERS_MODE:
25 return Users
26 elif get_config("user_mode") == TEAMS_MODE:
27 return Teams
28
29
30 def get_mode_as_word(plural=False, capitalize=False):
31 if get_config("user_mode") == USERS_MODE:
32 word = "user"
33 else:
34 word = "team"
35
36 if plural:
37 word += "s"
38 if capitalize:
39 word = word.title()
40 return word
41
```
Path: `CTFd/admin/submissions.py`
Content:
```
1 from flask import render_template, request, url_for
2
3 from CTFd.admin import admin
4 from CTFd.models import Challenges, Submissions
5 from CTFd.utils.decorators import admins_only
6 from CTFd.utils.helpers.models import build_model_filters
7 from CTFd.utils.modes import get_model
8
9
10 @admin.route("/admin/submissions", defaults={"submission_type": None})
11 @admin.route("/admin/submissions/<submission_type>")
12 @admins_only
13 def submissions_listing(submission_type):
14 filters_by = {}
15 if submission_type:
16 filters_by["type"] = submission_type
17 filters = []
18
19 q = request.args.get("q")
20 field = request.args.get("field")
21 page = abs(request.args.get("page", 1, type=int))
22
23 filters = build_model_filters(
24 model=Submissions,
25 query=q,
26 field=field,
27 extra_columns={
28 "challenge_name": Challenges.name,
29 "account_id": Submissions.account_id,
30 },
31 )
32
33 Model = get_model()
34
35 submissions = (
36 Submissions.query.add_columns(
37 Submissions.id,
38 Submissions.type,
39 Submissions.challenge_id,
40 Submissions.provided,
41 Submissions.account_id,
42 Submissions.date,
43 Challenges.name.label("challenge_name"),
44 Model.name.label("account_name"),
45 )
46 .filter_by(**filters_by)
47 .filter(*filters)
48 .join(Challenges)
49 .join(Model)
50 .order_by(Submissions.date.desc())
51 .paginate(page=page, per_page=50)
52 )
53
54 args = dict(request.args)
55 args.pop("page", 1)
56
57 return render_template(
58 "admin/submissions.html",
59 submissions=submissions,
60 prev_page=url_for(
61 request.endpoint,
62 submission_type=submission_type,
63 page=submissions.prev_num,
64 **args
65 ),
66 next_page=url_for(
67 request.endpoint,
68 submission_type=submission_type,
69 page=submissions.next_num,
70 **args
71 ),
72 type=submission_type,
73 q=q,
74 field=field,
75 )
76
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/admin/submissions.py b/CTFd/admin/submissions.py
--- a/CTFd/admin/submissions.py
+++ b/CTFd/admin/submissions.py
@@ -33,17 +33,7 @@
Model = get_model()
submissions = (
- Submissions.query.add_columns(
- Submissions.id,
- Submissions.type,
- Submissions.challenge_id,
- Submissions.provided,
- Submissions.account_id,
- Submissions.date,
- Challenges.name.label("challenge_name"),
- Model.name.label("account_name"),
- )
- .filter_by(**filters_by)
+ Submissions.query.filter_by(**filters_by)
.filter(*filters)
.join(Challenges)
.join(Model)
diff --git a/CTFd/constants/config.py b/CTFd/constants/config.py
--- a/CTFd/constants/config.py
+++ b/CTFd/constants/config.py
@@ -13,6 +13,12 @@
REGISTRATION_VISIBILITY = "registration_visibility"
+@JinjaEnum
+class UserModeTypes(str, RawEnum):
+ USERS = "users"
+ TEAMS = "teams"
+
+
@JinjaEnum
class ChallengeVisibilityTypes(str, RawEnum):
PUBLIC = "public"
diff --git a/CTFd/utils/modes/__init__.py b/CTFd/utils/modes/__init__.py
--- a/CTFd/utils/modes/__init__.py
+++ b/CTFd/utils/modes/__init__.py
@@ -3,6 +3,7 @@
from CTFd.models import Teams, Users
from CTFd.utils import get_config
+# TODO: Replace these constants with the UserModeTypes enum
USERS_MODE = "users"
TEAMS_MODE = "teams"
| {"golden_diff": "diff --git a/CTFd/admin/submissions.py b/CTFd/admin/submissions.py\n--- a/CTFd/admin/submissions.py\n+++ b/CTFd/admin/submissions.py\n@@ -33,17 +33,7 @@\n Model = get_model()\n \n submissions = (\n- Submissions.query.add_columns(\n- Submissions.id,\n- Submissions.type,\n- Submissions.challenge_id,\n- Submissions.provided,\n- Submissions.account_id,\n- Submissions.date,\n- Challenges.name.label(\"challenge_name\"),\n- Model.name.label(\"account_name\"),\n- )\n- .filter_by(**filters_by)\n+ Submissions.query.filter_by(**filters_by)\n .filter(*filters)\n .join(Challenges)\n .join(Model)\ndiff --git a/CTFd/constants/config.py b/CTFd/constants/config.py\n--- a/CTFd/constants/config.py\n+++ b/CTFd/constants/config.py\n@@ -13,6 +13,12 @@\n REGISTRATION_VISIBILITY = \"registration_visibility\"\n \n \n+@JinjaEnum\n+class UserModeTypes(str, RawEnum):\n+ USERS = \"users\"\n+ TEAMS = \"teams\"\n+\n+\n @JinjaEnum\n class ChallengeVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\ndiff --git a/CTFd/utils/modes/__init__.py b/CTFd/utils/modes/__init__.py\n--- a/CTFd/utils/modes/__init__.py\n+++ b/CTFd/utils/modes/__init__.py\n@@ -3,6 +3,7 @@\n from CTFd.models import Teams, Users\n from CTFd.utils import get_config\n \n+# TODO: Replace these constants with the UserModeTypes enum\n USERS_MODE = \"users\"\n TEAMS_MODE = \"teams\"\n", "issue": "Submissions should link directly to the user that submitted\nSubmissions don't link directly to the user in team mode which means you need to search to see what user submitted for a given team.\r\n\r\n\n", "before_files": [{"content": "import json\n\nfrom flask import url_for\n\nfrom CTFd.constants import JinjaEnum, RawEnum\nfrom CTFd.utils import get_config\n\n\nclass ConfigTypes(str, RawEnum):\n CHALLENGE_VISIBILITY = \"challenge_visibility\"\n SCORE_VISIBILITY = \"score_visibility\"\n ACCOUNT_VISIBILITY = \"account_visibility\"\n REGISTRATION_VISIBILITY = \"registration_visibility\"\n\n\n@JinjaEnum\nclass ChallengeVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n ADMINS = \"admins\"\n\n\n@JinjaEnum\nclass ScoreVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n HIDDEN = \"hidden\"\n ADMINS = \"admins\"\n\n\n@JinjaEnum\nclass AccountVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n ADMINS = \"admins\"\n\n\n@JinjaEnum\nclass RegistrationVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n\n\nclass _ConfigsWrapper:\n def __getattr__(self, attr):\n return get_config(attr)\n\n @property\n def ctf_name(self):\n return get_config(\"ctf_name\", default=\"CTFd\")\n\n @property\n def ctf_small_icon(self):\n icon = get_config(\"ctf_small_icon\")\n if icon:\n return url_for(\"views.files\", path=icon)\n return url_for(\"views.themes\", path=\"img/favicon.ico\")\n\n @property\n def theme_header(self):\n from CTFd.utils.helpers import markup\n\n return markup(get_config(\"theme_header\", default=\"\"))\n\n @property\n def theme_footer(self):\n from CTFd.utils.helpers import markup\n\n return markup(get_config(\"theme_footer\", default=\"\"))\n\n @property\n def theme_settings(self):\n return json.loads(get_config(\"theme_settings\", default=\"null\"))\n\n @property\n def tos_or_privacy(self):\n tos = bool(get_config(\"tos_url\") or get_config(\"tos_text\"))\n privacy = bool(get_config(\"privacy_url\") or get_config(\"privacy_text\"))\n return tos or privacy\n\n @property\n def tos_link(self):\n return get_config(\"tos_url\", default=url_for(\"views.tos\"))\n\n @property\n def privacy_link(self):\n return get_config(\"privacy_url\", default=url_for(\"views.privacy\"))\n\n\nConfigs = _ConfigsWrapper()\n", "path": "CTFd/constants/config.py"}, {"content": "from flask import url_for\n\nfrom CTFd.models import Teams, Users\nfrom CTFd.utils import get_config\n\nUSERS_MODE = \"users\"\nTEAMS_MODE = \"teams\"\n\n\ndef generate_account_url(account_id, admin=False):\n if get_config(\"user_mode\") == USERS_MODE:\n if admin:\n return url_for(\"admin.users_detail\", user_id=account_id)\n else:\n return url_for(\"users.public\", user_id=account_id)\n elif get_config(\"user_mode\") == TEAMS_MODE:\n if admin:\n return url_for(\"admin.teams_detail\", team_id=account_id)\n else:\n return url_for(\"teams.public\", team_id=account_id)\n\n\ndef get_model():\n if get_config(\"user_mode\") == USERS_MODE:\n return Users\n elif get_config(\"user_mode\") == TEAMS_MODE:\n return Teams\n\n\ndef get_mode_as_word(plural=False, capitalize=False):\n if get_config(\"user_mode\") == USERS_MODE:\n word = \"user\"\n else:\n word = \"team\"\n\n if plural:\n word += \"s\"\n if capitalize:\n word = word.title()\n return word\n", "path": "CTFd/utils/modes/__init__.py"}, {"content": "from flask import render_template, request, url_for\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Submissions\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.helpers.models import build_model_filters\nfrom CTFd.utils.modes import get_model\n\n\[email protected](\"/admin/submissions\", defaults={\"submission_type\": None})\[email protected](\"/admin/submissions/<submission_type>\")\n@admins_only\ndef submissions_listing(submission_type):\n filters_by = {}\n if submission_type:\n filters_by[\"type\"] = submission_type\n filters = []\n\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n\n filters = build_model_filters(\n model=Submissions,\n query=q,\n field=field,\n extra_columns={\n \"challenge_name\": Challenges.name,\n \"account_id\": Submissions.account_id,\n },\n )\n\n Model = get_model()\n\n submissions = (\n Submissions.query.add_columns(\n Submissions.id,\n Submissions.type,\n Submissions.challenge_id,\n Submissions.provided,\n Submissions.account_id,\n Submissions.date,\n Challenges.name.label(\"challenge_name\"),\n Model.name.label(\"account_name\"),\n )\n .filter_by(**filters_by)\n .filter(*filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n prev_page=url_for(\n request.endpoint,\n submission_type=submission_type,\n page=submissions.prev_num,\n **args\n ),\n next_page=url_for(\n request.endpoint,\n submission_type=submission_type,\n page=submissions.next_num,\n **args\n ),\n type=submission_type,\n q=q,\n field=field,\n )\n", "path": "CTFd/admin/submissions.py"}], "after_files": [{"content": "import json\n\nfrom flask import url_for\n\nfrom CTFd.constants import JinjaEnum, RawEnum\nfrom CTFd.utils import get_config\n\n\nclass ConfigTypes(str, RawEnum):\n CHALLENGE_VISIBILITY = \"challenge_visibility\"\n SCORE_VISIBILITY = \"score_visibility\"\n ACCOUNT_VISIBILITY = \"account_visibility\"\n REGISTRATION_VISIBILITY = \"registration_visibility\"\n\n\n@JinjaEnum\nclass UserModeTypes(str, RawEnum):\n USERS = \"users\"\n TEAMS = \"teams\"\n\n\n@JinjaEnum\nclass ChallengeVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n ADMINS = \"admins\"\n\n\n@JinjaEnum\nclass ScoreVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n HIDDEN = \"hidden\"\n ADMINS = \"admins\"\n\n\n@JinjaEnum\nclass AccountVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n ADMINS = \"admins\"\n\n\n@JinjaEnum\nclass RegistrationVisibilityTypes(str, RawEnum):\n PUBLIC = \"public\"\n PRIVATE = \"private\"\n\n\nclass _ConfigsWrapper:\n def __getattr__(self, attr):\n return get_config(attr)\n\n @property\n def ctf_name(self):\n return get_config(\"ctf_name\", default=\"CTFd\")\n\n @property\n def ctf_small_icon(self):\n icon = get_config(\"ctf_small_icon\")\n if icon:\n return url_for(\"views.files\", path=icon)\n return url_for(\"views.themes\", path=\"img/favicon.ico\")\n\n @property\n def theme_header(self):\n from CTFd.utils.helpers import markup\n\n return markup(get_config(\"theme_header\", default=\"\"))\n\n @property\n def theme_footer(self):\n from CTFd.utils.helpers import markup\n\n return markup(get_config(\"theme_footer\", default=\"\"))\n\n @property\n def theme_settings(self):\n return json.loads(get_config(\"theme_settings\", default=\"null\"))\n\n @property\n def tos_or_privacy(self):\n tos = bool(get_config(\"tos_url\") or get_config(\"tos_text\"))\n privacy = bool(get_config(\"privacy_url\") or get_config(\"privacy_text\"))\n return tos or privacy\n\n @property\n def tos_link(self):\n return get_config(\"tos_url\", default=url_for(\"views.tos\"))\n\n @property\n def privacy_link(self):\n return get_config(\"privacy_url\", default=url_for(\"views.privacy\"))\n\n\nConfigs = _ConfigsWrapper()\n", "path": "CTFd/constants/config.py"}, {"content": "from flask import url_for\n\nfrom CTFd.models import Teams, Users\nfrom CTFd.utils import get_config\n\n# TODO: Replace these constants with the UserModeTypes enum\nUSERS_MODE = \"users\"\nTEAMS_MODE = \"teams\"\n\n\ndef generate_account_url(account_id, admin=False):\n if get_config(\"user_mode\") == USERS_MODE:\n if admin:\n return url_for(\"admin.users_detail\", user_id=account_id)\n else:\n return url_for(\"users.public\", user_id=account_id)\n elif get_config(\"user_mode\") == TEAMS_MODE:\n if admin:\n return url_for(\"admin.teams_detail\", team_id=account_id)\n else:\n return url_for(\"teams.public\", team_id=account_id)\n\n\ndef get_model():\n if get_config(\"user_mode\") == USERS_MODE:\n return Users\n elif get_config(\"user_mode\") == TEAMS_MODE:\n return Teams\n\n\ndef get_mode_as_word(plural=False, capitalize=False):\n if get_config(\"user_mode\") == USERS_MODE:\n word = \"user\"\n else:\n word = \"team\"\n\n if plural:\n word += \"s\"\n if capitalize:\n word = word.title()\n return word\n", "path": "CTFd/utils/modes/__init__.py"}, {"content": "from flask import render_template, request, url_for\n\nfrom CTFd.admin import admin\nfrom CTFd.models import Challenges, Submissions\nfrom CTFd.utils.decorators import admins_only\nfrom CTFd.utils.helpers.models import build_model_filters\nfrom CTFd.utils.modes import get_model\n\n\[email protected](\"/admin/submissions\", defaults={\"submission_type\": None})\[email protected](\"/admin/submissions/<submission_type>\")\n@admins_only\ndef submissions_listing(submission_type):\n filters_by = {}\n if submission_type:\n filters_by[\"type\"] = submission_type\n filters = []\n\n q = request.args.get(\"q\")\n field = request.args.get(\"field\")\n page = abs(request.args.get(\"page\", 1, type=int))\n\n filters = build_model_filters(\n model=Submissions,\n query=q,\n field=field,\n extra_columns={\n \"challenge_name\": Challenges.name,\n \"account_id\": Submissions.account_id,\n },\n )\n\n Model = get_model()\n\n submissions = (\n Submissions.query.filter_by(**filters_by)\n .filter(*filters)\n .join(Challenges)\n .join(Model)\n .order_by(Submissions.date.desc())\n .paginate(page=page, per_page=50)\n )\n\n args = dict(request.args)\n args.pop(\"page\", 1)\n\n return render_template(\n \"admin/submissions.html\",\n submissions=submissions,\n prev_page=url_for(\n request.endpoint,\n submission_type=submission_type,\n page=submissions.prev_num,\n **args\n ),\n next_page=url_for(\n request.endpoint,\n submission_type=submission_type,\n page=submissions.next_num,\n **args\n ),\n type=submission_type,\n q=q,\n field=field,\n )\n", "path": "CTFd/admin/submissions.py"}]} | 1,963 | 391 |
gh_patches_debug_12551 | rasdani/github-patches | git_diff | quantumlib__Cirq-5211 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
cirq-web doesn't support LineQubit
```python
import cirq
import cirq_web
cirq_circuit = cirq.Circuit(cirq.H(cirq.LineQubit(0)))
cirq_web.Circuit3D(cirq_circuit).generate_html_file(
file_name="circuit_viewer.html",
open_in_browser=True,
)
```
results in
```
AttributeError: 'LineQubit' object has no attribute 'row'
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cirq-web/cirq_web/circuits/circuit.py`
Content:
```
1 # Copyright 2021 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import Iterable
15 import cirq
16 from cirq_web import widget
17 from cirq_web.circuits.symbols import (
18 Operation3DSymbol,
19 SymbolResolver,
20 resolve_operation,
21 DEFAULT_SYMBOL_RESOLVERS,
22 )
23
24
25 class Circuit3D(widget.Widget):
26 """Takes cirq.Circuit objects and displays them in 3D."""
27
28 def __init__(
29 self,
30 circuit: cirq.Circuit,
31 resolvers: Iterable[SymbolResolver] = DEFAULT_SYMBOL_RESOLVERS,
32 padding_factor: float = 1,
33 ):
34 """Initializes a Circuit instance.
35
36 Args:
37 circuit: The `cirq.Circuit` to be represented in 3D.
38 resolvers: The symbol resolve for how to show symbols in 3D.
39 padding_factor: The distance between meshes.
40 """
41 super().__init__()
42 self.circuit = circuit
43 self._resolvers = resolvers
44 self.padding_factor = padding_factor
45
46 def get_client_code(self) -> str:
47 # Remove hyphens from the id so that we can use
48 # it as the variable name in TS.
49 # It's important that we assign the circuit to a variable
50 # for animation purposes. Alternatively, there may be ways
51 # to select/manipulate elements on the screen from three.js
52 stripped_id = self.id.replace('-', '')
53 moments = len(self.circuit.moments)
54 self.serialized_circuit = self._serialize_circuit()
55
56 return f"""
57 <button id="camera-reset">Reset Camera</button>
58 <button id="camera-toggle">Toggle Camera Type</button>
59 <script>
60 let viz_{stripped_id} = createGridCircuit({self.serialized_circuit}, {moments}, "{self.id}", {self.padding_factor});
61
62 document.getElementById("camera-reset").addEventListener('click', () => {{
63 viz_{stripped_id}.scene.setCameraAndControls(viz_{stripped_id}.circuit);
64 }});
65
66 document.getElementById("camera-toggle").addEventListener('click', () => {{
67 viz_{stripped_id}.scene.toggleCamera(viz_{stripped_id}.circuit);
68 }});
69 </script>
70 """
71
72 def get_widget_bundle_name(self) -> str:
73 return 'circuit.bundle.js'
74
75 def _serialize_circuit(self) -> str:
76 args = []
77 moments = self.circuit.moments
78 for moment_id, moment in enumerate(moments):
79 for item in moment:
80 symbol = self._build_3D_symbol(item, moment_id)
81 args.append(symbol.to_typescript())
82
83 argument_str = ','.join(str(item) for item in args)
84 return f'[{argument_str}]'
85
86 def _build_3D_symbol(self, operation, moment) -> Operation3DSymbol:
87 symbol_info = resolve_operation(operation, self._resolvers)
88 location_info = []
89 for qubit in operation.qubits:
90 location_info.append({'row': qubit.row, 'col': qubit.col})
91 return Operation3DSymbol(symbol_info.labels, location_info, symbol_info.colors, moment)
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/cirq-web/cirq_web/circuits/circuit.py b/cirq-web/cirq_web/circuits/circuit.py
--- a/cirq-web/cirq_web/circuits/circuit.py
+++ b/cirq-web/cirq_web/circuits/circuit.py
@@ -87,5 +87,10 @@
symbol_info = resolve_operation(operation, self._resolvers)
location_info = []
for qubit in operation.qubits:
- location_info.append({'row': qubit.row, 'col': qubit.col})
+ if isinstance(qubit, cirq.GridQubit):
+ location_info.append({'row': qubit.row, 'col': qubit.col})
+ elif isinstance(qubit, cirq.LineQubit):
+ location_info.append({'row': qubit.x, 'col': 0})
+ else:
+ raise ValueError('Unsupported qubit type')
return Operation3DSymbol(symbol_info.labels, location_info, symbol_info.colors, moment)
| {"golden_diff": "diff --git a/cirq-web/cirq_web/circuits/circuit.py b/cirq-web/cirq_web/circuits/circuit.py\n--- a/cirq-web/cirq_web/circuits/circuit.py\n+++ b/cirq-web/cirq_web/circuits/circuit.py\n@@ -87,5 +87,10 @@\n symbol_info = resolve_operation(operation, self._resolvers)\n location_info = []\n for qubit in operation.qubits:\n- location_info.append({'row': qubit.row, 'col': qubit.col})\n+ if isinstance(qubit, cirq.GridQubit):\n+ location_info.append({'row': qubit.row, 'col': qubit.col})\n+ elif isinstance(qubit, cirq.LineQubit):\n+ location_info.append({'row': qubit.x, 'col': 0})\n+ else:\n+ raise ValueError('Unsupported qubit type')\n return Operation3DSymbol(symbol_info.labels, location_info, symbol_info.colors, moment)\n", "issue": "cirq-web doesn't support LineQubit\n```python\r\nimport cirq\r\nimport cirq_web\r\n\r\ncirq_circuit = cirq.Circuit(cirq.H(cirq.LineQubit(0)))\r\ncirq_web.Circuit3D(cirq_circuit).generate_html_file(\r\n file_name=\"circuit_viewer.html\",\r\n open_in_browser=True,\r\n)\r\n```\r\n\r\nresults in\r\n\r\n```\r\nAttributeError: 'LineQubit' object has no attribute 'row'\r\n```\n", "before_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Iterable\nimport cirq\nfrom cirq_web import widget\nfrom cirq_web.circuits.symbols import (\n Operation3DSymbol,\n SymbolResolver,\n resolve_operation,\n DEFAULT_SYMBOL_RESOLVERS,\n)\n\n\nclass Circuit3D(widget.Widget):\n \"\"\"Takes cirq.Circuit objects and displays them in 3D.\"\"\"\n\n def __init__(\n self,\n circuit: cirq.Circuit,\n resolvers: Iterable[SymbolResolver] = DEFAULT_SYMBOL_RESOLVERS,\n padding_factor: float = 1,\n ):\n \"\"\"Initializes a Circuit instance.\n\n Args:\n circuit: The `cirq.Circuit` to be represented in 3D.\n resolvers: The symbol resolve for how to show symbols in 3D.\n padding_factor: The distance between meshes.\n \"\"\"\n super().__init__()\n self.circuit = circuit\n self._resolvers = resolvers\n self.padding_factor = padding_factor\n\n def get_client_code(self) -> str:\n # Remove hyphens from the id so that we can use\n # it as the variable name in TS.\n # It's important that we assign the circuit to a variable\n # for animation purposes. Alternatively, there may be ways\n # to select/manipulate elements on the screen from three.js\n stripped_id = self.id.replace('-', '')\n moments = len(self.circuit.moments)\n self.serialized_circuit = self._serialize_circuit()\n\n return f\"\"\"\n <button id=\"camera-reset\">Reset Camera</button>\n <button id=\"camera-toggle\">Toggle Camera Type</button>\n <script>\n let viz_{stripped_id} = createGridCircuit({self.serialized_circuit}, {moments}, \"{self.id}\", {self.padding_factor});\n\n document.getElementById(\"camera-reset\").addEventListener('click', () => {{\n viz_{stripped_id}.scene.setCameraAndControls(viz_{stripped_id}.circuit);\n }});\n\n document.getElementById(\"camera-toggle\").addEventListener('click', () => {{\n viz_{stripped_id}.scene.toggleCamera(viz_{stripped_id}.circuit);\n }});\n </script>\n \"\"\"\n\n def get_widget_bundle_name(self) -> str:\n return 'circuit.bundle.js'\n\n def _serialize_circuit(self) -> str:\n args = []\n moments = self.circuit.moments\n for moment_id, moment in enumerate(moments):\n for item in moment:\n symbol = self._build_3D_symbol(item, moment_id)\n args.append(symbol.to_typescript())\n\n argument_str = ','.join(str(item) for item in args)\n return f'[{argument_str}]'\n\n def _build_3D_symbol(self, operation, moment) -> Operation3DSymbol:\n symbol_info = resolve_operation(operation, self._resolvers)\n location_info = []\n for qubit in operation.qubits:\n location_info.append({'row': qubit.row, 'col': qubit.col})\n return Operation3DSymbol(symbol_info.labels, location_info, symbol_info.colors, moment)\n", "path": "cirq-web/cirq_web/circuits/circuit.py"}], "after_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import Iterable\nimport cirq\nfrom cirq_web import widget\nfrom cirq_web.circuits.symbols import (\n Operation3DSymbol,\n SymbolResolver,\n resolve_operation,\n DEFAULT_SYMBOL_RESOLVERS,\n)\n\n\nclass Circuit3D(widget.Widget):\n \"\"\"Takes cirq.Circuit objects and displays them in 3D.\"\"\"\n\n def __init__(\n self,\n circuit: cirq.Circuit,\n resolvers: Iterable[SymbolResolver] = DEFAULT_SYMBOL_RESOLVERS,\n padding_factor: float = 1,\n ):\n \"\"\"Initializes a Circuit instance.\n\n Args:\n circuit: The `cirq.Circuit` to be represented in 3D.\n resolvers: The symbol resolve for how to show symbols in 3D.\n padding_factor: The distance between meshes.\n \"\"\"\n super().__init__()\n self.circuit = circuit\n self._resolvers = resolvers\n self.padding_factor = padding_factor\n\n def get_client_code(self) -> str:\n # Remove hyphens from the id so that we can use\n # it as the variable name in TS.\n # It's important that we assign the circuit to a variable\n # for animation purposes. Alternatively, there may be ways\n # to select/manipulate elements on the screen from three.js\n stripped_id = self.id.replace('-', '')\n moments = len(self.circuit.moments)\n self.serialized_circuit = self._serialize_circuit()\n\n return f\"\"\"\n <button id=\"camera-reset\">Reset Camera</button>\n <button id=\"camera-toggle\">Toggle Camera Type</button>\n <script>\n let viz_{stripped_id} = createGridCircuit({self.serialized_circuit}, {moments}, \"{self.id}\", {self.padding_factor});\n\n document.getElementById(\"camera-reset\").addEventListener('click', () => {{\n viz_{stripped_id}.scene.setCameraAndControls(viz_{stripped_id}.circuit);\n }});\n\n document.getElementById(\"camera-toggle\").addEventListener('click', () => {{\n viz_{stripped_id}.scene.toggleCamera(viz_{stripped_id}.circuit);\n }});\n </script>\n \"\"\"\n\n def get_widget_bundle_name(self) -> str:\n return 'circuit.bundle.js'\n\n def _serialize_circuit(self) -> str:\n args = []\n moments = self.circuit.moments\n for moment_id, moment in enumerate(moments):\n for item in moment:\n symbol = self._build_3D_symbol(item, moment_id)\n args.append(symbol.to_typescript())\n\n argument_str = ','.join(str(item) for item in args)\n return f'[{argument_str}]'\n\n def _build_3D_symbol(self, operation, moment) -> Operation3DSymbol:\n symbol_info = resolve_operation(operation, self._resolvers)\n location_info = []\n for qubit in operation.qubits:\n if isinstance(qubit, cirq.GridQubit):\n location_info.append({'row': qubit.row, 'col': qubit.col})\n elif isinstance(qubit, cirq.LineQubit):\n location_info.append({'row': qubit.x, 'col': 0})\n else:\n raise ValueError('Unsupported qubit type')\n return Operation3DSymbol(symbol_info.labels, location_info, symbol_info.colors, moment)\n", "path": "cirq-web/cirq_web/circuits/circuit.py"}]} | 1,339 | 215 |
gh_patches_debug_27053 | rasdani/github-patches | git_diff | cloud-custodian__cloud-custodian-5715 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
aws.cloudtrail status IsLogging fails with KeyError: 'c7n:TrailStatus'
**Describe the bug**
cloudtrail status / IsLogging returns key error KeyError: 'c7n:TrailStatus'
https://cloudcustodian.io/docs/aws/resources/cloudtrail.html#status indicates this should work.
**To Reproduce**
Authenticate to an account that has both an Organizational trail and local trail.
Policy file:
```yaml
- name: awslogs-cloudtrail
resource: aws.cloudtrail
filters:
- type: status
key: IsLogging
value: True
```
Output is:
```
2020-05-05 09:03:16,332: custodian.commands:ERROR Error while executing policy awslogs-cloudtrail, continuing
Traceback (most recent call last):
File "/usr/local/lib/python3.7/site-packages/c7n/commands.py", line 281, in run
policy()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 1062, in __call__
resources = mode.run()
File "/usr/local/lib/python3.7/site-packages/c7n/policy.py", line 291, in run
resources = self.policy.resource_manager.resources()
File "/usr/local/lib/python3.7/site-packages/c7n/query.py", line 466, in resources
resources = self.filter_resources(resources)
File "/usr/local/lib/python3.7/site-packages/c7n/manager.py", line 109, in filter_resources
resources = f.process(resources, event)
File "/usr/local/lib/python3.7/site-packages/c7n/resources/cloudtrail.py", line 122, in process
return super(Status, self).process(resources)
File "/usr/local/lib/python3.7/site-packages/c7n/filters/core.py", line 499, in process
return super(ValueFilter, self).process(resources, event)
File "/usr/local/lib/python3.7/site-packages/c7n/filters/core.py", line 197, in process
return list(filter(self, resources))
File "/usr/local/lib/python3.7/site-packages/c7n/resources/cloudtrail.py", line 125, in __call__
return self.match(r['c7n:TrailStatus'])
KeyError: 'c7n:TrailStatus'
```
**Expected behavior**
Only CloudTrails that are enabled are returned, no error.
**Background (please complete the following information):**
- OS: OSX 10.14.6
- Python Version: Python 3.7.4
- Custodian Version: 0.9.1
- Cloud Provider: aws
- Policy:
```yaml
policies:
- name: awslogs-cloudtrail
resource: cloudtrail
filters:
- type: status
key: IsLogging
value: True
```
- Traceback: [if applicable, please exclude sensitive/account information]
```
Custodian: 0.9.1
Python: 3.7.4 (default, Sep 7 2019, 18:27:02)
[Clang 10.0.1 (clang-1001.0.46.4)]
Platform: posix.uname_result(sysname='Darwin', nodename='xxx', release='18.7.0', version='Darwin Kernel Version 18.7.0: Tue Aug 20 16:57:14 PDT 2019; root:xnu-4903.271.2~2/RELEASE_X86_64', machine='x86_64')
Using venv: False
Docker: False
Installed:
argcomplete==1.11.1
attrs==19.3.0
boto3==1.12.47
botocore==1.15.47
docutils==0.15.2
importlib-metadata==1.6.0
jmespath==0.9.5
jsonschema==3.2.0
pyrsistent==0.16.0
python-dateutil==2.8.1
pyyaml==5.3.1
s3transfer==0.3.3
setuptools==41.0.1
six==1.14.0
tabulate==0.8.7
urllib3==1.25.9
zipp==3.1.0
```
Under --debug, self has:
```
(Pdb) p dir(self)
['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_validate_resource_count', '_validate_value_regex', 'annotate', 'annotation_key', 'data', 'executor_factory', 'expr', 'get_block_operator', 'get_permissions', 'get_resource_value', 'log', 'manager', 'match', 'merge_annotation', 'metrics', 'op', 'permissions', 'process', 'process_value_type', 'required_keys', 'schema', 'schema_alias', 'type', 'type_aliases', 'v', 'validate', 'vtype']
```
Adding this filter _before_ the status filter works around the issue:
```yaml
- type: is-shadow
state: False
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `c7n/resources/cloudtrail.py`
Content:
```
1 # Copyright 2017-2019 Capital One Services, LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 import logging
15
16 from c7n.actions import Action, BaseAction
17 from c7n.exceptions import PolicyValidationError
18 from c7n.filters import ValueFilter, Filter
19 from c7n.manager import resources
20 from c7n.tags import universal_augment
21 from c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo
22 from c7n.utils import local_session, type_schema
23
24 from .aws import shape_validate, Arn
25
26 log = logging.getLogger('c7n.resources.cloudtrail')
27
28
29 class DescribeTrail(DescribeSource):
30
31 def augment(self, resources):
32 return universal_augment(self.manager, resources)
33
34
35 @resources.register('cloudtrail')
36 class CloudTrail(QueryResourceManager):
37
38 class resource_type(TypeInfo):
39 service = 'cloudtrail'
40 enum_spec = ('describe_trails', 'trailList', None)
41 filter_name = 'trailNameList'
42 filter_type = 'list'
43 arn = id = 'TrailARN'
44 name = 'Name'
45 cfn_type = config_type = "AWS::CloudTrail::Trail"
46 universal_taggable = object()
47
48 source_mapping = {
49 'describe': DescribeTrail,
50 'config': ConfigSource
51 }
52
53
54 @CloudTrail.filter_registry.register('is-shadow')
55 class IsShadow(Filter):
56 """Identify shadow trails (secondary copies), shadow trails
57 can't be modified directly, the origin trail needs to be modified.
58
59 Shadow trails are created for multi-region trails as well for
60 organizational trails.
61 """
62 schema = type_schema('is-shadow', state={'type': 'boolean'})
63 permissions = ('cloudtrail:DescribeTrails',)
64 embedded = False
65
66 def process(self, resources, event=None):
67 rcount = len(resources)
68 trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]
69 if len(trails) != rcount and self.embedded:
70 self.log.info("implicitly filtering shadow trails %d -> %d",
71 rcount, len(trails))
72 return trails
73
74 def is_shadow(self, t):
75 if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:
76 return True
77 if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:
78 return True
79 return False
80
81
82 @CloudTrail.filter_registry.register('status')
83 class Status(ValueFilter):
84 """Filter a cloudtrail by its status.
85
86 :Example:
87
88 .. code-block:: yaml
89
90 policies:
91 - name: cloudtrail-check-status
92 resource: aws.cloudtrail
93 filters:
94 - type: status
95 key: IsLogging
96 value: False
97 """
98
99 schema = type_schema('status', rinherit=ValueFilter.schema)
100 schema_alias = False
101 permissions = ('cloudtrail:GetTrailStatus',)
102 annotation_key = 'c7n:TrailStatus'
103
104 def process(self, resources, event=None):
105 for r in resources:
106 region = self.manager.config.region
107 trail_arn = Arn.parse(r['TrailARN'])
108
109 if (r.get('IsOrganizationTrail') and
110 self.manager.config.account_id != trail_arn.account_id):
111 continue
112 if r.get('HomeRegion') and r['HomeRegion'] != region:
113 region = trail_arn.region
114 if self.annotation_key in r:
115 continue
116 client = local_session(self.manager.session_factory).client(
117 'cloudtrail', region_name=region)
118 status = client.get_trail_status(Name=r['Name'])
119 status.pop('ResponseMetadata')
120 r[self.annotation_key] = status
121
122 return super(Status, self).process(resources)
123
124 def __call__(self, r):
125 return self.match(r['c7n:TrailStatus'])
126
127
128 @CloudTrail.action_registry.register('update-trail')
129 class UpdateTrail(Action):
130 """Update trail attributes.
131
132 :Example:
133
134 .. code-block:: yaml
135
136 policies:
137 - name: cloudtrail-set-log
138 resource: aws.cloudtrail
139 filters:
140 - or:
141 - KmsKeyId: empty
142 - LogFileValidationEnabled: false
143 actions:
144 - type: update-trail
145 attributes:
146 KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef
147 EnableLogFileValidation: true
148 """
149 schema = type_schema(
150 'update-trail',
151 attributes={'type': 'object'},
152 required=('attributes',))
153 shape = 'UpdateTrailRequest'
154 permissions = ('cloudtrail:UpdateTrail',)
155
156 def validate(self):
157 attrs = dict(self.data['attributes'])
158 if 'Name' in attrs:
159 raise PolicyValidationError(
160 "Can't include Name in update-trail action")
161 attrs['Name'] = 'PolicyValidation'
162 return shape_validate(
163 attrs,
164 self.shape,
165 self.manager.resource_type.service)
166
167 def process(self, resources):
168 client = local_session(self.manager.session_factory).client('cloudtrail')
169 shadow_check = IsShadow({'state': False}, self.manager)
170 shadow_check.embedded = True
171 resources = shadow_check.process(resources)
172
173 for r in resources:
174 client.update_trail(
175 Name=r['Name'],
176 **self.data['attributes'])
177
178
179 @CloudTrail.action_registry.register('set-logging')
180 class SetLogging(Action):
181 """Set the logging state of a trail
182
183 :Example:
184
185 .. code-block:: yaml
186
187 policies:
188 - name: cloudtrail-set-active
189 resource: aws.cloudtrail
190 filters:
191 - type: status
192 key: IsLogging
193 value: False
194 actions:
195 - type: set-logging
196 enabled: True
197 """
198 schema = type_schema(
199 'set-logging', enabled={'type': 'boolean'})
200
201 def get_permissions(self):
202 enable = self.data.get('enabled', True)
203 if enable is True:
204 return ('cloudtrail:StartLogging',)
205 else:
206 return ('cloudtrail:StopLogging',)
207
208 def process(self, resources):
209 client = local_session(self.manager.session_factory).client('cloudtrail')
210 shadow_check = IsShadow({'state': False}, self.manager)
211 shadow_check.embedded = True
212 resources = shadow_check.process(resources)
213 enable = self.data.get('enabled', True)
214
215 for r in resources:
216 if enable:
217 client.start_logging(Name=r['Name'])
218 else:
219 client.stop_logging(Name=r['Name'])
220
221
222 @CloudTrail.action_registry.register('delete')
223 class DeleteTrail(BaseAction):
224 """ Delete a cloud trail
225
226 :example:
227
228 .. code-block:: yaml
229
230 policies:
231 - name: delete-cloudtrail
232 resource: aws.cloudtrail
233 filters:
234 - type: value
235 key: Name
236 value: delete-me
237 op: eq
238 actions:
239 - type: delete
240 """
241
242 schema = type_schema('delete')
243 permissions = ('cloudtrail:DeleteTrail',)
244
245 def process(self, resources):
246 client = local_session(self.manager.session_factory).client('cloudtrail')
247 shadow_check = IsShadow({'state': False}, self.manager)
248 shadow_check.embedded = True
249 resources = shadow_check.process(resources)
250 for r in resources:
251 try:
252 client.delete_trail(Name=r['Name'])
253 except client.exceptions.TrailNotFoundException:
254 continue
255
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py
--- a/c7n/resources/cloudtrail.py
+++ b/c7n/resources/cloudtrail.py
@@ -102,12 +102,16 @@
annotation_key = 'c7n:TrailStatus'
def process(self, resources, event=None):
+
+ non_account_trails = set()
+
for r in resources:
region = self.manager.config.region
trail_arn = Arn.parse(r['TrailARN'])
if (r.get('IsOrganizationTrail') and
self.manager.config.account_id != trail_arn.account_id):
+ non_account_trails.add(r['TrailARN'])
continue
if r.get('HomeRegion') and r['HomeRegion'] != region:
region = trail_arn.region
@@ -119,7 +123,12 @@
status.pop('ResponseMetadata')
r[self.annotation_key] = status
- return super(Status, self).process(resources)
+ if non_account_trails:
+ self.log.warning(
+ 'found %d org cloud trail from different account that cant be processed',
+ len(non_account_trails))
+ return super(Status, self).process([
+ r for r in resources if r['TrailARN'] not in non_account_trails])
def __call__(self, r):
return self.match(r['c7n:TrailStatus'])
| {"golden_diff": "diff --git a/c7n/resources/cloudtrail.py b/c7n/resources/cloudtrail.py\n--- a/c7n/resources/cloudtrail.py\n+++ b/c7n/resources/cloudtrail.py\n@@ -102,12 +102,16 @@\n annotation_key = 'c7n:TrailStatus'\n \n def process(self, resources, event=None):\n+\n+ non_account_trails = set()\n+\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n \n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n+ non_account_trails.add(r['TrailARN'])\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n@@ -119,7 +123,12 @@\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n \n- return super(Status, self).process(resources)\n+ if non_account_trails:\n+ self.log.warning(\n+ 'found %d org cloud trail from different account that cant be processed',\n+ len(non_account_trails))\n+ return super(Status, self).process([\n+ r for r in resources if r['TrailARN'] not in non_account_trails])\n \n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n", "issue": "aws.cloudtrail status IsLogging fails with KeyError: 'c7n:TrailStatus'\n**Describe the bug**\r\ncloudtrail status / IsLogging returns key error KeyError: 'c7n:TrailStatus'\r\n\r\nhttps://cloudcustodian.io/docs/aws/resources/cloudtrail.html#status indicates this should work.\r\n\r\n**To Reproduce**\r\nAuthenticate to an account that has both an Organizational trail and local trail.\r\n\r\nPolicy file:\r\n```yaml\r\n - name: awslogs-cloudtrail\r\n resource: aws.cloudtrail\r\n filters:\r\n - type: status\r\n key: IsLogging\r\n value: True\r\n```\r\n\r\nOutput is:\r\n```\r\n2020-05-05 09:03:16,332: custodian.commands:ERROR Error while executing policy awslogs-cloudtrail, continuing\r\nTraceback (most recent call last):\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/commands.py\", line 281, in run\r\n policy()\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/policy.py\", line 1062, in __call__\r\n resources = mode.run()\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/policy.py\", line 291, in run\r\n resources = self.policy.resource_manager.resources()\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/query.py\", line 466, in resources\r\n resources = self.filter_resources(resources)\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/manager.py\", line 109, in filter_resources\r\n resources = f.process(resources, event)\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/resources/cloudtrail.py\", line 122, in process\r\n return super(Status, self).process(resources)\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/filters/core.py\", line 499, in process\r\n return super(ValueFilter, self).process(resources, event)\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/filters/core.py\", line 197, in process\r\n return list(filter(self, resources))\r\n File \"/usr/local/lib/python3.7/site-packages/c7n/resources/cloudtrail.py\", line 125, in __call__\r\n return self.match(r['c7n:TrailStatus'])\r\nKeyError: 'c7n:TrailStatus'\r\n```\r\n\r\n**Expected behavior**\r\nOnly CloudTrails that are enabled are returned, no error.\r\n\r\n\r\n**Background (please complete the following information):**\r\n - OS: OSX 10.14.6\r\n - Python Version: Python 3.7.4\r\n - Custodian Version: 0.9.1\r\n - Cloud Provider: aws\r\n - Policy:\r\n```yaml\r\npolicies: \r\n - name: awslogs-cloudtrail\r\n resource: cloudtrail\r\n filters:\r\n - type: status\r\n key: IsLogging\r\n value: True\r\n```\r\n - Traceback: [if applicable, please exclude sensitive/account information]\r\n ```\r\nCustodian: 0.9.1\r\nPython: 3.7.4 (default, Sep 7 2019, 18:27:02)\r\n [Clang 10.0.1 (clang-1001.0.46.4)]\r\nPlatform: posix.uname_result(sysname='Darwin', nodename='xxx', release='18.7.0', version='Darwin Kernel Version 18.7.0: Tue Aug 20 16:57:14 PDT 2019; root:xnu-4903.271.2~2/RELEASE_X86_64', machine='x86_64')\r\nUsing venv: False\r\nDocker: False\r\nInstalled:\r\n\r\nargcomplete==1.11.1\r\nattrs==19.3.0\r\nboto3==1.12.47\r\nbotocore==1.15.47\r\ndocutils==0.15.2\r\nimportlib-metadata==1.6.0\r\njmespath==0.9.5\r\njsonschema==3.2.0\r\npyrsistent==0.16.0\r\npython-dateutil==2.8.1\r\npyyaml==5.3.1\r\ns3transfer==0.3.3\r\nsetuptools==41.0.1\r\nsix==1.14.0\r\ntabulate==0.8.7\r\nurllib3==1.25.9\r\nzipp==3.1.0\r\n```\r\n\r\nUnder --debug, self has:\r\n```\r\n(Pdb) p dir(self)\r\n['__call__', '__class__', '__delattr__', '__dict__', '__dir__', '__doc__', '__eq__', '__format__', '__ge__', '__getattribute__', '__gt__', '__hash__', '__init__', '__init_subclass__', '__le__', '__lt__', '__module__', '__ne__', '__new__', '__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__', '__str__', '__subclasshook__', '__weakref__', '_validate_resource_count', '_validate_value_regex', 'annotate', 'annotation_key', 'data', 'executor_factory', 'expr', 'get_block_operator', 'get_permissions', 'get_resource_value', 'log', 'manager', 'match', 'merge_annotation', 'metrics', 'op', 'permissions', 'process', 'process_value_type', 'required_keys', 'schema', 'schema_alias', 'type', 'type_aliases', 'v', 'validate', 'vtype']\r\n```\r\n\r\nAdding this filter _before_ the status filter works around the issue:\r\n```yaml\r\n - type: is-shadow\r\n state: False\r\n```\n", "before_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\n\nfrom c7n.actions import Action, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.tags import universal_augment\nfrom c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\nclass DescribeTrail(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n cfn_type = config_type = \"AWS::CloudTrail::Trail\"\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeTrail,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-check-status\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n schema_alias = False\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n return super(Status, self).process(resources)\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n\n\[email protected]_registry.register('delete')\nclass DeleteTrail(BaseAction):\n \"\"\" Delete a cloud trail\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cloudtrail\n resource: aws.cloudtrail\n filters:\n - type: value\n key: Name\n value: delete-me\n op: eq\n actions:\n - type: delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudtrail:DeleteTrail',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n for r in resources:\n try:\n client.delete_trail(Name=r['Name'])\n except client.exceptions.TrailNotFoundException:\n continue\n", "path": "c7n/resources/cloudtrail.py"}], "after_files": [{"content": "# Copyright 2017-2019 Capital One Services, LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport logging\n\nfrom c7n.actions import Action, BaseAction\nfrom c7n.exceptions import PolicyValidationError\nfrom c7n.filters import ValueFilter, Filter\nfrom c7n.manager import resources\nfrom c7n.tags import universal_augment\nfrom c7n.query import ConfigSource, DescribeSource, QueryResourceManager, TypeInfo\nfrom c7n.utils import local_session, type_schema\n\nfrom .aws import shape_validate, Arn\n\nlog = logging.getLogger('c7n.resources.cloudtrail')\n\n\nclass DescribeTrail(DescribeSource):\n\n def augment(self, resources):\n return universal_augment(self.manager, resources)\n\n\[email protected]('cloudtrail')\nclass CloudTrail(QueryResourceManager):\n\n class resource_type(TypeInfo):\n service = 'cloudtrail'\n enum_spec = ('describe_trails', 'trailList', None)\n filter_name = 'trailNameList'\n filter_type = 'list'\n arn = id = 'TrailARN'\n name = 'Name'\n cfn_type = config_type = \"AWS::CloudTrail::Trail\"\n universal_taggable = object()\n\n source_mapping = {\n 'describe': DescribeTrail,\n 'config': ConfigSource\n }\n\n\[email protected]_registry.register('is-shadow')\nclass IsShadow(Filter):\n \"\"\"Identify shadow trails (secondary copies), shadow trails\n can't be modified directly, the origin trail needs to be modified.\n\n Shadow trails are created for multi-region trails as well for\n organizational trails.\n \"\"\"\n schema = type_schema('is-shadow', state={'type': 'boolean'})\n permissions = ('cloudtrail:DescribeTrails',)\n embedded = False\n\n def process(self, resources, event=None):\n rcount = len(resources)\n trails = [t for t in resources if (self.is_shadow(t) == self.data.get('state', True))]\n if len(trails) != rcount and self.embedded:\n self.log.info(\"implicitly filtering shadow trails %d -> %d\",\n rcount, len(trails))\n return trails\n\n def is_shadow(self, t):\n if t.get('IsOrganizationTrail') and self.manager.config.account_id not in t['TrailARN']:\n return True\n if t.get('IsMultiRegionTrail') and t['HomeRegion'] != self.manager.config.region:\n return True\n return False\n\n\[email protected]_registry.register('status')\nclass Status(ValueFilter):\n \"\"\"Filter a cloudtrail by its status.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-check-status\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n \"\"\"\n\n schema = type_schema('status', rinherit=ValueFilter.schema)\n schema_alias = False\n permissions = ('cloudtrail:GetTrailStatus',)\n annotation_key = 'c7n:TrailStatus'\n\n def process(self, resources, event=None):\n\n non_account_trails = set()\n\n for r in resources:\n region = self.manager.config.region\n trail_arn = Arn.parse(r['TrailARN'])\n\n if (r.get('IsOrganizationTrail') and\n self.manager.config.account_id != trail_arn.account_id):\n non_account_trails.add(r['TrailARN'])\n continue\n if r.get('HomeRegion') and r['HomeRegion'] != region:\n region = trail_arn.region\n if self.annotation_key in r:\n continue\n client = local_session(self.manager.session_factory).client(\n 'cloudtrail', region_name=region)\n status = client.get_trail_status(Name=r['Name'])\n status.pop('ResponseMetadata')\n r[self.annotation_key] = status\n\n if non_account_trails:\n self.log.warning(\n 'found %d org cloud trail from different account that cant be processed',\n len(non_account_trails))\n return super(Status, self).process([\n r for r in resources if r['TrailARN'] not in non_account_trails])\n\n def __call__(self, r):\n return self.match(r['c7n:TrailStatus'])\n\n\[email protected]_registry.register('update-trail')\nclass UpdateTrail(Action):\n \"\"\"Update trail attributes.\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-log\n resource: aws.cloudtrail\n filters:\n - or:\n - KmsKeyId: empty\n - LogFileValidationEnabled: false\n actions:\n - type: update-trail\n attributes:\n KmsKeyId: arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef\n EnableLogFileValidation: true\n \"\"\"\n schema = type_schema(\n 'update-trail',\n attributes={'type': 'object'},\n required=('attributes',))\n shape = 'UpdateTrailRequest'\n permissions = ('cloudtrail:UpdateTrail',)\n\n def validate(self):\n attrs = dict(self.data['attributes'])\n if 'Name' in attrs:\n raise PolicyValidationError(\n \"Can't include Name in update-trail action\")\n attrs['Name'] = 'PolicyValidation'\n return shape_validate(\n attrs,\n self.shape,\n self.manager.resource_type.service)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n\n for r in resources:\n client.update_trail(\n Name=r['Name'],\n **self.data['attributes'])\n\n\[email protected]_registry.register('set-logging')\nclass SetLogging(Action):\n \"\"\"Set the logging state of a trail\n\n :Example:\n\n .. code-block:: yaml\n\n policies:\n - name: cloudtrail-set-active\n resource: aws.cloudtrail\n filters:\n - type: status\n key: IsLogging\n value: False\n actions:\n - type: set-logging\n enabled: True\n \"\"\"\n schema = type_schema(\n 'set-logging', enabled={'type': 'boolean'})\n\n def get_permissions(self):\n enable = self.data.get('enabled', True)\n if enable is True:\n return ('cloudtrail:StartLogging',)\n else:\n return ('cloudtrail:StopLogging',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n enable = self.data.get('enabled', True)\n\n for r in resources:\n if enable:\n client.start_logging(Name=r['Name'])\n else:\n client.stop_logging(Name=r['Name'])\n\n\[email protected]_registry.register('delete')\nclass DeleteTrail(BaseAction):\n \"\"\" Delete a cloud trail\n\n :example:\n\n .. code-block:: yaml\n\n policies:\n - name: delete-cloudtrail\n resource: aws.cloudtrail\n filters:\n - type: value\n key: Name\n value: delete-me\n op: eq\n actions:\n - type: delete\n \"\"\"\n\n schema = type_schema('delete')\n permissions = ('cloudtrail:DeleteTrail',)\n\n def process(self, resources):\n client = local_session(self.manager.session_factory).client('cloudtrail')\n shadow_check = IsShadow({'state': False}, self.manager)\n shadow_check.embedded = True\n resources = shadow_check.process(resources)\n for r in resources:\n try:\n client.delete_trail(Name=r['Name'])\n except client.exceptions.TrailNotFoundException:\n continue\n", "path": "c7n/resources/cloudtrail.py"}]} | 3,915 | 316 |
gh_patches_debug_38232 | rasdani/github-patches | git_diff | freqtrade__freqtrade-2217 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Plot-scripts require --datadir
## Describe your environment
* Python Version: 3.7
* Branch: Develop
* Last Commit ID: 962d487edb0d28f95d6395c09189a333c436fd20
## Describe the problem:
Currently, `freqtrade plot-dataframe` does require either a valid configuration (`--config` or `config.json` in cwd - or `--datadir user_data/data/bittrex` to find the backtest data.
This is because without one of these, the exchange is not known, which is a requirement to find the data in the datadir.
## Possible fixes
* Error and point out that one of the 2 conditions have to be met
* add `--exchange` parameter as alternative (including the above)
* ... other ideas?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `freqtrade/configuration/arguments.py`
Content:
```
1 """
2 This module contains the argument manager class
3 """
4 import argparse
5 from typing import List, Optional
6
7 from freqtrade.configuration.cli_options import AVAILABLE_CLI_OPTIONS
8 from freqtrade import constants
9
10 ARGS_COMMON = ["verbosity", "logfile", "version", "config", "datadir", "user_data_dir"]
11
12 ARGS_STRATEGY = ["strategy", "strategy_path"]
13
14 ARGS_MAIN = ARGS_COMMON + ARGS_STRATEGY + ["db_url", "sd_notify"]
15
16 ARGS_COMMON_OPTIMIZE = ["ticker_interval", "timerange",
17 "max_open_trades", "stake_amount", "refresh_pairs"]
18
19 ARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + ["position_stacking", "use_max_market_positions",
20 "strategy_list", "export", "exportfilename"]
21
22 ARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + ["hyperopt", "hyperopt_path",
23 "position_stacking", "epochs", "spaces",
24 "use_max_market_positions", "print_all",
25 "print_colorized", "print_json", "hyperopt_jobs",
26 "hyperopt_random_state", "hyperopt_min_trades",
27 "hyperopt_continue", "hyperopt_loss"]
28
29 ARGS_EDGE = ARGS_COMMON_OPTIMIZE + ["stoploss_range"]
30
31 ARGS_LIST_EXCHANGES = ["print_one_column"]
32
33 ARGS_CREATE_USERDIR = ["user_data_dir"]
34
35 ARGS_DOWNLOAD_DATA = ["pairs", "pairs_file", "days", "exchange", "timeframes", "erase"]
36
37 ARGS_PLOT_DATAFRAME = ["pairs", "indicators1", "indicators2", "plot_limit", "db_url",
38 "trade_source", "export", "exportfilename", "timerange", "ticker_interval"]
39
40 ARGS_PLOT_PROFIT = ["pairs", "timerange", "export", "exportfilename", "db_url",
41 "trade_source", "ticker_interval"]
42
43 NO_CONF_REQURIED = ["download-data", "plot-dataframe", "plot-profit"]
44
45
46 class Arguments(object):
47 """
48 Arguments Class. Manage the arguments received by the cli
49 """
50 def __init__(self, args: Optional[List[str]]) -> None:
51 self.args = args
52 self._parsed_arg: Optional[argparse.Namespace] = None
53 self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot')
54
55 def _load_args(self) -> None:
56 self._build_args(optionlist=ARGS_MAIN)
57 self._build_subcommands()
58
59 def get_parsed_arg(self) -> argparse.Namespace:
60 """
61 Return the list of arguments
62 :return: List[str] List of arguments
63 """
64 if self._parsed_arg is None:
65 self._load_args()
66 self._parsed_arg = self._parse_args()
67
68 return self._parsed_arg
69
70 def _parse_args(self) -> argparse.Namespace:
71 """
72 Parses given arguments and returns an argparse Namespace instance.
73 """
74 parsed_arg = self.parser.parse_args(self.args)
75
76 # Workaround issue in argparse with action='append' and default value
77 # (see https://bugs.python.org/issue16399)
78 # Allow no-config for certain commands (like downloading / plotting)
79 if (parsed_arg.config is None
80 and not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED)):
81 parsed_arg.config = [constants.DEFAULT_CONFIG]
82
83 return parsed_arg
84
85 def _build_args(self, optionlist, parser=None):
86 parser = parser or self.parser
87
88 for val in optionlist:
89 opt = AVAILABLE_CLI_OPTIONS[val]
90 parser.add_argument(*opt.cli, dest=val, **opt.kwargs)
91
92 def _build_subcommands(self) -> None:
93 """
94 Builds and attaches all subcommands.
95 :return: None
96 """
97 from freqtrade.optimize import start_backtesting, start_hyperopt, start_edge
98 from freqtrade.utils import start_create_userdir, start_download_data, start_list_exchanges
99
100 subparsers = self.parser.add_subparsers(dest='subparser')
101
102 # Add backtesting subcommand
103 backtesting_cmd = subparsers.add_parser('backtesting', help='Backtesting module.')
104 backtesting_cmd.set_defaults(func=start_backtesting)
105 self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd)
106
107 # Add edge subcommand
108 edge_cmd = subparsers.add_parser('edge', help='Edge module.')
109 edge_cmd.set_defaults(func=start_edge)
110 self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd)
111
112 # Add hyperopt subcommand
113 hyperopt_cmd = subparsers.add_parser('hyperopt', help='Hyperopt module.')
114 hyperopt_cmd.set_defaults(func=start_hyperopt)
115 self._build_args(optionlist=ARGS_HYPEROPT, parser=hyperopt_cmd)
116
117 # add create-userdir subcommand
118 create_userdir_cmd = subparsers.add_parser('create-userdir',
119 help="Create user-data directory.")
120 create_userdir_cmd.set_defaults(func=start_create_userdir)
121 self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd)
122
123 # Add list-exchanges subcommand
124 list_exchanges_cmd = subparsers.add_parser(
125 'list-exchanges',
126 help='Print available exchanges.'
127 )
128 list_exchanges_cmd.set_defaults(func=start_list_exchanges)
129 self._build_args(optionlist=ARGS_LIST_EXCHANGES, parser=list_exchanges_cmd)
130
131 # Add download-data subcommand
132 download_data_cmd = subparsers.add_parser(
133 'download-data',
134 help='Download backtesting data.'
135 )
136 download_data_cmd.set_defaults(func=start_download_data)
137 self._build_args(optionlist=ARGS_DOWNLOAD_DATA, parser=download_data_cmd)
138
139 # Add Plotting subcommand
140 from freqtrade.plot.plot_utils import start_plot_dataframe, start_plot_profit
141 plot_dataframe_cmd = subparsers.add_parser(
142 'plot-dataframe',
143 help='Plot candles with indicators.'
144 )
145 plot_dataframe_cmd.set_defaults(func=start_plot_dataframe)
146 self._build_args(optionlist=ARGS_PLOT_DATAFRAME, parser=plot_dataframe_cmd)
147
148 # Plot profit
149 plot_profit_cmd = subparsers.add_parser(
150 'plot-profit',
151 help='Generate plot showing profits.'
152 )
153 plot_profit_cmd.set_defaults(func=start_plot_profit)
154 self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd)
155
```
Path: `freqtrade/plot/plot_utils.py`
Content:
```
1 from argparse import Namespace
2
3 from freqtrade.state import RunMode
4 from freqtrade.utils import setup_utils_configuration
5
6
7 def start_plot_dataframe(args: Namespace) -> None:
8 """
9 Entrypoint for dataframe plotting
10 """
11 # Import here to avoid errors if plot-dependencies are not installed.
12 from freqtrade.plot.plotting import analyse_and_plot_pairs
13 config = setup_utils_configuration(args, RunMode.PLOT)
14
15 analyse_and_plot_pairs(config)
16
17
18 def start_plot_profit(args: Namespace) -> None:
19 """
20 Entrypoint for plot_profit
21 """
22 # Import here to avoid errors if plot-dependencies are not installed.
23 from freqtrade.plot.plotting import plot_profit
24 config = setup_utils_configuration(args, RunMode.PLOT)
25
26 plot_profit(config)
27
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/freqtrade/configuration/arguments.py b/freqtrade/configuration/arguments.py
--- a/freqtrade/configuration/arguments.py
+++ b/freqtrade/configuration/arguments.py
@@ -3,6 +3,7 @@
"""
import argparse
from typing import List, Optional
+from pathlib import Path
from freqtrade.configuration.cli_options import AVAILABLE_CLI_OPTIONS
from freqtrade import constants
@@ -73,11 +74,13 @@
"""
parsed_arg = self.parser.parse_args(self.args)
+ # When no config is provided, but a config exists, use that configuration!
+
# Workaround issue in argparse with action='append' and default value
# (see https://bugs.python.org/issue16399)
# Allow no-config for certain commands (like downloading / plotting)
- if (parsed_arg.config is None
- and not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED)):
+ if (parsed_arg.config is None and ((Path.cwd() / constants.DEFAULT_CONFIG).is_file() or
+ not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED))):
parsed_arg.config = [constants.DEFAULT_CONFIG]
return parsed_arg
diff --git a/freqtrade/plot/plot_utils.py b/freqtrade/plot/plot_utils.py
--- a/freqtrade/plot/plot_utils.py
+++ b/freqtrade/plot/plot_utils.py
@@ -1,15 +1,24 @@
from argparse import Namespace
-
+from freqtrade import OperationalException
from freqtrade.state import RunMode
from freqtrade.utils import setup_utils_configuration
+def validate_plot_args(args: Namespace):
+ args_tmp = vars(args)
+ if not args_tmp.get('datadir') and not args_tmp.get('config'):
+ raise OperationalException(
+ "You need to specify either `--datadir` or `--config` "
+ "for plot-profit and plot-dataframe.")
+
+
def start_plot_dataframe(args: Namespace) -> None:
"""
Entrypoint for dataframe plotting
"""
# Import here to avoid errors if plot-dependencies are not installed.
from freqtrade.plot.plotting import analyse_and_plot_pairs
+ validate_plot_args(args)
config = setup_utils_configuration(args, RunMode.PLOT)
analyse_and_plot_pairs(config)
@@ -21,6 +30,7 @@
"""
# Import here to avoid errors if plot-dependencies are not installed.
from freqtrade.plot.plotting import plot_profit
+ validate_plot_args(args)
config = setup_utils_configuration(args, RunMode.PLOT)
plot_profit(config)
| {"golden_diff": "diff --git a/freqtrade/configuration/arguments.py b/freqtrade/configuration/arguments.py\n--- a/freqtrade/configuration/arguments.py\n+++ b/freqtrade/configuration/arguments.py\n@@ -3,6 +3,7 @@\n \"\"\"\n import argparse\n from typing import List, Optional\n+from pathlib import Path\n \n from freqtrade.configuration.cli_options import AVAILABLE_CLI_OPTIONS\n from freqtrade import constants\n@@ -73,11 +74,13 @@\n \"\"\"\n parsed_arg = self.parser.parse_args(self.args)\n \n+ # When no config is provided, but a config exists, use that configuration!\n+\n # Workaround issue in argparse with action='append' and default value\n # (see https://bugs.python.org/issue16399)\n # Allow no-config for certain commands (like downloading / plotting)\n- if (parsed_arg.config is None\n- and not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED)):\n+ if (parsed_arg.config is None and ((Path.cwd() / constants.DEFAULT_CONFIG).is_file() or\n+ not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED))):\n parsed_arg.config = [constants.DEFAULT_CONFIG]\n \n return parsed_arg\ndiff --git a/freqtrade/plot/plot_utils.py b/freqtrade/plot/plot_utils.py\n--- a/freqtrade/plot/plot_utils.py\n+++ b/freqtrade/plot/plot_utils.py\n@@ -1,15 +1,24 @@\n from argparse import Namespace\n-\n+from freqtrade import OperationalException\n from freqtrade.state import RunMode\n from freqtrade.utils import setup_utils_configuration\n \n \n+def validate_plot_args(args: Namespace):\n+ args_tmp = vars(args)\n+ if not args_tmp.get('datadir') and not args_tmp.get('config'):\n+ raise OperationalException(\n+ \"You need to specify either `--datadir` or `--config` \"\n+ \"for plot-profit and plot-dataframe.\")\n+\n+\n def start_plot_dataframe(args: Namespace) -> None:\n \"\"\"\n Entrypoint for dataframe plotting\n \"\"\"\n # Import here to avoid errors if plot-dependencies are not installed.\n from freqtrade.plot.plotting import analyse_and_plot_pairs\n+ validate_plot_args(args)\n config = setup_utils_configuration(args, RunMode.PLOT)\n \n analyse_and_plot_pairs(config)\n@@ -21,6 +30,7 @@\n \"\"\"\n # Import here to avoid errors if plot-dependencies are not installed.\n from freqtrade.plot.plotting import plot_profit\n+ validate_plot_args(args)\n config = setup_utils_configuration(args, RunMode.PLOT)\n \n plot_profit(config)\n", "issue": "Plot-scripts require --datadir\n## Describe your environment\r\n\r\n * Python Version: 3.7\r\n * Branch: Develop\r\n * Last Commit ID: 962d487edb0d28f95d6395c09189a333c436fd20\r\n \r\n## Describe the problem:\r\n\r\nCurrently, `freqtrade plot-dataframe` does require either a valid configuration (`--config` or `config.json` in cwd - or `--datadir user_data/data/bittrex` to find the backtest data.\r\nThis is because without one of these, the exchange is not known, which is a requirement to find the data in the datadir.\r\n\r\n## Possible fixes\r\n\r\n* Error and point out that one of the 2 conditions have to be met\r\n* add `--exchange` parameter as alternative (including the above)\r\n\r\n* ... other ideas?\n", "before_files": [{"content": "\"\"\"\nThis module contains the argument manager class\n\"\"\"\nimport argparse\nfrom typing import List, Optional\n\nfrom freqtrade.configuration.cli_options import AVAILABLE_CLI_OPTIONS\nfrom freqtrade import constants\n\nARGS_COMMON = [\"verbosity\", \"logfile\", \"version\", \"config\", \"datadir\", \"user_data_dir\"]\n\nARGS_STRATEGY = [\"strategy\", \"strategy_path\"]\n\nARGS_MAIN = ARGS_COMMON + ARGS_STRATEGY + [\"db_url\", \"sd_notify\"]\n\nARGS_COMMON_OPTIMIZE = [\"ticker_interval\", \"timerange\",\n \"max_open_trades\", \"stake_amount\", \"refresh_pairs\"]\n\nARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [\"position_stacking\", \"use_max_market_positions\",\n \"strategy_list\", \"export\", \"exportfilename\"]\n\nARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [\"hyperopt\", \"hyperopt_path\",\n \"position_stacking\", \"epochs\", \"spaces\",\n \"use_max_market_positions\", \"print_all\",\n \"print_colorized\", \"print_json\", \"hyperopt_jobs\",\n \"hyperopt_random_state\", \"hyperopt_min_trades\",\n \"hyperopt_continue\", \"hyperopt_loss\"]\n\nARGS_EDGE = ARGS_COMMON_OPTIMIZE + [\"stoploss_range\"]\n\nARGS_LIST_EXCHANGES = [\"print_one_column\"]\n\nARGS_CREATE_USERDIR = [\"user_data_dir\"]\n\nARGS_DOWNLOAD_DATA = [\"pairs\", \"pairs_file\", \"days\", \"exchange\", \"timeframes\", \"erase\"]\n\nARGS_PLOT_DATAFRAME = [\"pairs\", \"indicators1\", \"indicators2\", \"plot_limit\", \"db_url\",\n \"trade_source\", \"export\", \"exportfilename\", \"timerange\", \"ticker_interval\"]\n\nARGS_PLOT_PROFIT = [\"pairs\", \"timerange\", \"export\", \"exportfilename\", \"db_url\",\n \"trade_source\", \"ticker_interval\"]\n\nNO_CONF_REQURIED = [\"download-data\", \"plot-dataframe\", \"plot-profit\"]\n\n\nclass Arguments(object):\n \"\"\"\n Arguments Class. Manage the arguments received by the cli\n \"\"\"\n def __init__(self, args: Optional[List[str]]) -> None:\n self.args = args\n self._parsed_arg: Optional[argparse.Namespace] = None\n self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot')\n\n def _load_args(self) -> None:\n self._build_args(optionlist=ARGS_MAIN)\n self._build_subcommands()\n\n def get_parsed_arg(self) -> argparse.Namespace:\n \"\"\"\n Return the list of arguments\n :return: List[str] List of arguments\n \"\"\"\n if self._parsed_arg is None:\n self._load_args()\n self._parsed_arg = self._parse_args()\n\n return self._parsed_arg\n\n def _parse_args(self) -> argparse.Namespace:\n \"\"\"\n Parses given arguments and returns an argparse Namespace instance.\n \"\"\"\n parsed_arg = self.parser.parse_args(self.args)\n\n # Workaround issue in argparse with action='append' and default value\n # (see https://bugs.python.org/issue16399)\n # Allow no-config for certain commands (like downloading / plotting)\n if (parsed_arg.config is None\n and not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED)):\n parsed_arg.config = [constants.DEFAULT_CONFIG]\n\n return parsed_arg\n\n def _build_args(self, optionlist, parser=None):\n parser = parser or self.parser\n\n for val in optionlist:\n opt = AVAILABLE_CLI_OPTIONS[val]\n parser.add_argument(*opt.cli, dest=val, **opt.kwargs)\n\n def _build_subcommands(self) -> None:\n \"\"\"\n Builds and attaches all subcommands.\n :return: None\n \"\"\"\n from freqtrade.optimize import start_backtesting, start_hyperopt, start_edge\n from freqtrade.utils import start_create_userdir, start_download_data, start_list_exchanges\n\n subparsers = self.parser.add_subparsers(dest='subparser')\n\n # Add backtesting subcommand\n backtesting_cmd = subparsers.add_parser('backtesting', help='Backtesting module.')\n backtesting_cmd.set_defaults(func=start_backtesting)\n self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd)\n\n # Add edge subcommand\n edge_cmd = subparsers.add_parser('edge', help='Edge module.')\n edge_cmd.set_defaults(func=start_edge)\n self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd)\n\n # Add hyperopt subcommand\n hyperopt_cmd = subparsers.add_parser('hyperopt', help='Hyperopt module.')\n hyperopt_cmd.set_defaults(func=start_hyperopt)\n self._build_args(optionlist=ARGS_HYPEROPT, parser=hyperopt_cmd)\n\n # add create-userdir subcommand\n create_userdir_cmd = subparsers.add_parser('create-userdir',\n help=\"Create user-data directory.\")\n create_userdir_cmd.set_defaults(func=start_create_userdir)\n self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd)\n\n # Add list-exchanges subcommand\n list_exchanges_cmd = subparsers.add_parser(\n 'list-exchanges',\n help='Print available exchanges.'\n )\n list_exchanges_cmd.set_defaults(func=start_list_exchanges)\n self._build_args(optionlist=ARGS_LIST_EXCHANGES, parser=list_exchanges_cmd)\n\n # Add download-data subcommand\n download_data_cmd = subparsers.add_parser(\n 'download-data',\n help='Download backtesting data.'\n )\n download_data_cmd.set_defaults(func=start_download_data)\n self._build_args(optionlist=ARGS_DOWNLOAD_DATA, parser=download_data_cmd)\n\n # Add Plotting subcommand\n from freqtrade.plot.plot_utils import start_plot_dataframe, start_plot_profit\n plot_dataframe_cmd = subparsers.add_parser(\n 'plot-dataframe',\n help='Plot candles with indicators.'\n )\n plot_dataframe_cmd.set_defaults(func=start_plot_dataframe)\n self._build_args(optionlist=ARGS_PLOT_DATAFRAME, parser=plot_dataframe_cmd)\n\n # Plot profit\n plot_profit_cmd = subparsers.add_parser(\n 'plot-profit',\n help='Generate plot showing profits.'\n )\n plot_profit_cmd.set_defaults(func=start_plot_profit)\n self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd)\n", "path": "freqtrade/configuration/arguments.py"}, {"content": "from argparse import Namespace\n\nfrom freqtrade.state import RunMode\nfrom freqtrade.utils import setup_utils_configuration\n\n\ndef start_plot_dataframe(args: Namespace) -> None:\n \"\"\"\n Entrypoint for dataframe plotting\n \"\"\"\n # Import here to avoid errors if plot-dependencies are not installed.\n from freqtrade.plot.plotting import analyse_and_plot_pairs\n config = setup_utils_configuration(args, RunMode.PLOT)\n\n analyse_and_plot_pairs(config)\n\n\ndef start_plot_profit(args: Namespace) -> None:\n \"\"\"\n Entrypoint for plot_profit\n \"\"\"\n # Import here to avoid errors if plot-dependencies are not installed.\n from freqtrade.plot.plotting import plot_profit\n config = setup_utils_configuration(args, RunMode.PLOT)\n\n plot_profit(config)\n", "path": "freqtrade/plot/plot_utils.py"}], "after_files": [{"content": "\"\"\"\nThis module contains the argument manager class\n\"\"\"\nimport argparse\nfrom typing import List, Optional\nfrom pathlib import Path\n\nfrom freqtrade.configuration.cli_options import AVAILABLE_CLI_OPTIONS\nfrom freqtrade import constants\n\nARGS_COMMON = [\"verbosity\", \"logfile\", \"version\", \"config\", \"datadir\", \"user_data_dir\"]\n\nARGS_STRATEGY = [\"strategy\", \"strategy_path\"]\n\nARGS_MAIN = ARGS_COMMON + ARGS_STRATEGY + [\"db_url\", \"sd_notify\"]\n\nARGS_COMMON_OPTIMIZE = [\"ticker_interval\", \"timerange\",\n \"max_open_trades\", \"stake_amount\", \"refresh_pairs\"]\n\nARGS_BACKTEST = ARGS_COMMON_OPTIMIZE + [\"position_stacking\", \"use_max_market_positions\",\n \"strategy_list\", \"export\", \"exportfilename\"]\n\nARGS_HYPEROPT = ARGS_COMMON_OPTIMIZE + [\"hyperopt\", \"hyperopt_path\",\n \"position_stacking\", \"epochs\", \"spaces\",\n \"use_max_market_positions\", \"print_all\",\n \"print_colorized\", \"print_json\", \"hyperopt_jobs\",\n \"hyperopt_random_state\", \"hyperopt_min_trades\",\n \"hyperopt_continue\", \"hyperopt_loss\"]\n\nARGS_EDGE = ARGS_COMMON_OPTIMIZE + [\"stoploss_range\"]\n\nARGS_LIST_EXCHANGES = [\"print_one_column\"]\n\nARGS_CREATE_USERDIR = [\"user_data_dir\"]\n\nARGS_DOWNLOAD_DATA = [\"pairs\", \"pairs_file\", \"days\", \"exchange\", \"timeframes\", \"erase\"]\n\nARGS_PLOT_DATAFRAME = [\"pairs\", \"indicators1\", \"indicators2\", \"plot_limit\", \"db_url\",\n \"trade_source\", \"export\", \"exportfilename\", \"timerange\", \"ticker_interval\"]\n\nARGS_PLOT_PROFIT = [\"pairs\", \"timerange\", \"export\", \"exportfilename\", \"db_url\",\n \"trade_source\", \"ticker_interval\"]\n\nNO_CONF_REQURIED = [\"download-data\", \"plot-dataframe\", \"plot-profit\"]\n\n\nclass Arguments(object):\n \"\"\"\n Arguments Class. Manage the arguments received by the cli\n \"\"\"\n def __init__(self, args: Optional[List[str]]) -> None:\n self.args = args\n self._parsed_arg: Optional[argparse.Namespace] = None\n self.parser = argparse.ArgumentParser(description='Free, open source crypto trading bot')\n\n def _load_args(self) -> None:\n self._build_args(optionlist=ARGS_MAIN)\n self._build_subcommands()\n\n def get_parsed_arg(self) -> argparse.Namespace:\n \"\"\"\n Return the list of arguments\n :return: List[str] List of arguments\n \"\"\"\n if self._parsed_arg is None:\n self._load_args()\n self._parsed_arg = self._parse_args()\n\n return self._parsed_arg\n\n def _parse_args(self) -> argparse.Namespace:\n \"\"\"\n Parses given arguments and returns an argparse Namespace instance.\n \"\"\"\n parsed_arg = self.parser.parse_args(self.args)\n\n # When no config is provided, but a config exists, use that configuration!\n\n # Workaround issue in argparse with action='append' and default value\n # (see https://bugs.python.org/issue16399)\n # Allow no-config for certain commands (like downloading / plotting)\n if (parsed_arg.config is None and ((Path.cwd() / constants.DEFAULT_CONFIG).is_file() or\n not ('subparser' in parsed_arg and parsed_arg.subparser in NO_CONF_REQURIED))):\n parsed_arg.config = [constants.DEFAULT_CONFIG]\n\n return parsed_arg\n\n def _build_args(self, optionlist, parser=None):\n parser = parser or self.parser\n\n for val in optionlist:\n opt = AVAILABLE_CLI_OPTIONS[val]\n parser.add_argument(*opt.cli, dest=val, **opt.kwargs)\n\n def _build_subcommands(self) -> None:\n \"\"\"\n Builds and attaches all subcommands.\n :return: None\n \"\"\"\n from freqtrade.optimize import start_backtesting, start_hyperopt, start_edge\n from freqtrade.utils import start_create_userdir, start_download_data, start_list_exchanges\n\n subparsers = self.parser.add_subparsers(dest='subparser')\n\n # Add backtesting subcommand\n backtesting_cmd = subparsers.add_parser('backtesting', help='Backtesting module.')\n backtesting_cmd.set_defaults(func=start_backtesting)\n self._build_args(optionlist=ARGS_BACKTEST, parser=backtesting_cmd)\n\n # Add edge subcommand\n edge_cmd = subparsers.add_parser('edge', help='Edge module.')\n edge_cmd.set_defaults(func=start_edge)\n self._build_args(optionlist=ARGS_EDGE, parser=edge_cmd)\n\n # Add hyperopt subcommand\n hyperopt_cmd = subparsers.add_parser('hyperopt', help='Hyperopt module.')\n hyperopt_cmd.set_defaults(func=start_hyperopt)\n self._build_args(optionlist=ARGS_HYPEROPT, parser=hyperopt_cmd)\n\n # add create-userdir subcommand\n create_userdir_cmd = subparsers.add_parser('create-userdir',\n help=\"Create user-data directory.\")\n create_userdir_cmd.set_defaults(func=start_create_userdir)\n self._build_args(optionlist=ARGS_CREATE_USERDIR, parser=create_userdir_cmd)\n\n # Add list-exchanges subcommand\n list_exchanges_cmd = subparsers.add_parser(\n 'list-exchanges',\n help='Print available exchanges.'\n )\n list_exchanges_cmd.set_defaults(func=start_list_exchanges)\n self._build_args(optionlist=ARGS_LIST_EXCHANGES, parser=list_exchanges_cmd)\n\n # Add download-data subcommand\n download_data_cmd = subparsers.add_parser(\n 'download-data',\n help='Download backtesting data.'\n )\n download_data_cmd.set_defaults(func=start_download_data)\n self._build_args(optionlist=ARGS_DOWNLOAD_DATA, parser=download_data_cmd)\n\n # Add Plotting subcommand\n from freqtrade.plot.plot_utils import start_plot_dataframe, start_plot_profit\n plot_dataframe_cmd = subparsers.add_parser(\n 'plot-dataframe',\n help='Plot candles with indicators.'\n )\n plot_dataframe_cmd.set_defaults(func=start_plot_dataframe)\n self._build_args(optionlist=ARGS_PLOT_DATAFRAME, parser=plot_dataframe_cmd)\n\n # Plot profit\n plot_profit_cmd = subparsers.add_parser(\n 'plot-profit',\n help='Generate plot showing profits.'\n )\n plot_profit_cmd.set_defaults(func=start_plot_profit)\n self._build_args(optionlist=ARGS_PLOT_PROFIT, parser=plot_profit_cmd)\n", "path": "freqtrade/configuration/arguments.py"}, {"content": "from argparse import Namespace\nfrom freqtrade import OperationalException\nfrom freqtrade.state import RunMode\nfrom freqtrade.utils import setup_utils_configuration\n\n\ndef validate_plot_args(args: Namespace):\n args_tmp = vars(args)\n if not args_tmp.get('datadir') and not args_tmp.get('config'):\n raise OperationalException(\n \"You need to specify either `--datadir` or `--config` \"\n \"for plot-profit and plot-dataframe.\")\n\n\ndef start_plot_dataframe(args: Namespace) -> None:\n \"\"\"\n Entrypoint for dataframe plotting\n \"\"\"\n # Import here to avoid errors if plot-dependencies are not installed.\n from freqtrade.plot.plotting import analyse_and_plot_pairs\n validate_plot_args(args)\n config = setup_utils_configuration(args, RunMode.PLOT)\n\n analyse_and_plot_pairs(config)\n\n\ndef start_plot_profit(args: Namespace) -> None:\n \"\"\"\n Entrypoint for plot_profit\n \"\"\"\n # Import here to avoid errors if plot-dependencies are not installed.\n from freqtrade.plot.plotting import plot_profit\n validate_plot_args(args)\n config = setup_utils_configuration(args, RunMode.PLOT)\n\n plot_profit(config)\n", "path": "freqtrade/plot/plot_utils.py"}]} | 2,389 | 584 |
gh_patches_debug_27850 | rasdani/github-patches | git_diff | streamlit__streamlit-5021 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
st.radio with DataFrame fails on rerun
### Summary
When you pass a DataFrame as the options in an st.radio, every rerun throws an error (but the first run works).
### Steps to reproduce
Code snippet:
```
import streamlit as st
import pandas as pd
df = pd.DataFrame({'foo': ['one', 'two']})
st.radio('Foo', df)
```
1. Run the code above.
2. Press "R" to rerun the code above.
**Expected behavior:**
The rerun works, just like the first run.
**Actual behavior:**
The app hangs (stays in running state forever) and shows the error below in the terminal:
```
Exception in thread ScriptRunner.scriptThread:
Traceback (most recent call last):
File "/usr/local/Cellar/[email protected]/3.9.5/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 954, in _bootstrap_inner
self.run()
File "/usr/local/Cellar/[email protected]/3.9.5/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py", line 892, in run
self._target(*self._args, **self._kwargs)
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/script_runner.py", line 210, in _process_request_queue
widget_states = self._session_state.as_widget_states()
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py", line 560, in as_widget_states
return self._new_widget_state.as_widget_states()
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py", line 211, in as_widget_states
states = [
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py", line 214, in <listcomp>
if self.get_serialized(widget_id)
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py", line 190, in get_serialized
serialized = metadata.serializer(item.value)
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/elements/radio.py", line 136, in serialize_radio
return index_(options, v)
File "/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/util.py", line 129, in index_
raise ValueError("{} is not in iterable".format(str(x)))
ValueError: one is not in iterable
```
### Is this a regression?
yes
Previous known working version = 0.84.0
### Debug info
- Streamlit version: 1.4.0
- Python version: 3.9.5
### Additional information
A meta-bug related to this: I'm not sure why this error is thrown in the terminal rather than inside the Streamlit app. Previously, our goal was to have _every_ error appear in the app, so you never had to check the terminal. It would be great to see if some code change unexpectedly changed this behavior.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `e2e/scripts/st_radio.py`
Content:
```
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import streamlit as st
16
17 options = ("female", "male")
18 i1 = st.radio("radio 1", options, 1)
19 st.write("value 1:", i1)
20
21 i2 = st.radio("radio 2", options, 0, format_func=lambda x: x.capitalize())
22 st.write("value 2:", i2)
23
24 i3 = st.radio("radio 3", [])
25 st.write("value 3:", i3)
26
27 i4 = st.radio("radio 4", options, disabled=True)
28 st.write("value 4:", i4)
29
30 i5 = st.radio("radio 5", options, horizontal=True)
31 st.write("value 5:", i5)
32
33 if st._is_running_with_streamlit:
34
35 def on_change():
36 st.session_state.radio_changed = True
37
38 st.radio("radio 6", options, 1, key="radio6", on_change=on_change)
39 st.write("value 6:", st.session_state.radio6)
40 st.write("radio changed:", "radio_changed" in st.session_state)
41
```
Path: `lib/streamlit/elements/radio.py`
Content:
```
1 # Copyright 2018-2022 Streamlit Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from textwrap import dedent
16 from typing import Any, Callable, Optional, cast
17
18 import streamlit
19 from streamlit.errors import StreamlitAPIException
20 from streamlit.proto.Radio_pb2 import Radio as RadioProto
21 from streamlit.scriptrunner import ScriptRunContext, get_script_run_ctx
22 from streamlit.state import (
23 register_widget,
24 WidgetArgs,
25 WidgetCallback,
26 WidgetKwargs,
27 )
28 from streamlit.type_util import Key, OptionSequence, ensure_indexable, to_key
29 from streamlit.util import index_
30 from .form import current_form_id
31 from .utils import check_callback_rules, check_session_state_rules
32
33
34 class RadioMixin:
35 def radio(
36 self,
37 label: str,
38 options: OptionSequence,
39 index: int = 0,
40 format_func: Callable[[Any], Any] = str,
41 key: Optional[Key] = None,
42 help: Optional[str] = None,
43 on_change: Optional[WidgetCallback] = None,
44 args: Optional[WidgetArgs] = None,
45 kwargs: Optional[WidgetKwargs] = None,
46 *, # keyword-only args:
47 disabled: bool = False,
48 horizontal: bool = False,
49 ) -> Any:
50 """Display a radio button widget.
51
52 Parameters
53 ----------
54 label : str
55 A short label explaining to the user what this radio group is for.
56 options : Sequence, numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index
57 Labels for the radio options. This will be cast to str internally
58 by default. For pandas.DataFrame, the first column is selected.
59 index : int
60 The index of the preselected option on first render.
61 format_func : function
62 Function to modify the display of radio options. It receives
63 the raw option as an argument and should output the label to be
64 shown for that option. This has no impact on the return value of
65 the radio.
66 key : str or int
67 An optional string or integer to use as the unique key for the widget.
68 If this is omitted, a key will be generated for the widget
69 based on its content. Multiple widgets of the same type may
70 not share the same key.
71 help : str
72 An optional tooltip that gets displayed next to the radio.
73 on_change : callable
74 An optional callback invoked when this radio's value changes.
75 args : tuple
76 An optional tuple of args to pass to the callback.
77 kwargs : dict
78 An optional dict of kwargs to pass to the callback.
79 disabled : bool
80 An optional boolean, which disables the radio button if set to
81 True. The default is False. This argument can only be supplied by
82 keyword.
83 horizontal : bool
84 An optional boolean, which orients the radio group horizontally.
85 The default is false (vertical buttons). This argument can only
86 be supplied by keyword.
87
88 Returns
89 -------
90 any
91 The selected option.
92
93 Example
94 -------
95 >>> genre = st.radio(
96 ... "What\'s your favorite movie genre",
97 ... ('Comedy', 'Drama', 'Documentary'))
98 >>>
99 >>> if genre == 'Comedy':
100 ... st.write('You selected comedy.')
101 ... else:
102 ... st.write("You didn\'t select comedy.")
103
104 .. output::
105 https://doc-radio.streamlitapp.com/
106 height: 260px
107
108 """
109 ctx = get_script_run_ctx()
110 return self._radio(
111 label=label,
112 options=options,
113 index=index,
114 format_func=format_func,
115 key=key,
116 help=help,
117 on_change=on_change,
118 args=args,
119 kwargs=kwargs,
120 disabled=disabled,
121 horizontal=horizontal,
122 ctx=ctx,
123 )
124
125 def _radio(
126 self,
127 label: str,
128 options: OptionSequence,
129 index: int = 0,
130 format_func: Callable[[Any], Any] = str,
131 key: Optional[Key] = None,
132 help: Optional[str] = None,
133 on_change: Optional[WidgetCallback] = None,
134 args: Optional[WidgetArgs] = None,
135 kwargs: Optional[WidgetKwargs] = None,
136 *, # keyword-only args:
137 disabled: bool = False,
138 horizontal: bool = False,
139 ctx: Optional[ScriptRunContext],
140 ) -> Any:
141 key = to_key(key)
142 check_callback_rules(self.dg, on_change)
143 check_session_state_rules(default_value=None if index == 0 else index, key=key)
144
145 opt = ensure_indexable(options)
146
147 if not isinstance(index, int):
148 raise StreamlitAPIException(
149 "Radio Value has invalid type: %s" % type(index).__name__
150 )
151
152 if len(opt) > 0 and not 0 <= index < len(opt):
153 raise StreamlitAPIException(
154 "Radio index must be between 0 and length of options"
155 )
156
157 radio_proto = RadioProto()
158 radio_proto.label = label
159 radio_proto.default = index
160 radio_proto.options[:] = [str(format_func(option)) for option in opt]
161 radio_proto.form_id = current_form_id(self.dg)
162 radio_proto.horizontal = horizontal
163 if help is not None:
164 radio_proto.help = dedent(help)
165
166 def deserialize_radio(ui_value, widget_id=""):
167 idx = ui_value if ui_value is not None else index
168
169 return opt[idx] if len(opt) > 0 and opt[idx] is not None else None
170
171 def serialize_radio(v):
172 if len(options) == 0:
173 return 0
174 return index_(options, v)
175
176 widget_state = register_widget(
177 "radio",
178 radio_proto,
179 user_key=key,
180 on_change_handler=on_change,
181 args=args,
182 kwargs=kwargs,
183 deserializer=deserialize_radio,
184 serializer=serialize_radio,
185 ctx=ctx,
186 )
187
188 # This needs to be done after register_widget because we don't want
189 # the following proto fields to affect a widget's ID.
190 radio_proto.disabled = disabled
191 if widget_state.value_changed:
192 radio_proto.value = serialize_radio(widget_state.value)
193 radio_proto.set_value = True
194
195 self.dg._enqueue("radio", radio_proto)
196 return widget_state.value
197
198 @property
199 def dg(self) -> "streamlit.delta_generator.DeltaGenerator":
200 """Get our DeltaGenerator."""
201 return cast("streamlit.delta_generator.DeltaGenerator", self)
202
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/e2e/scripts/st_radio.py b/e2e/scripts/st_radio.py
--- a/e2e/scripts/st_radio.py
+++ b/e2e/scripts/st_radio.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import pandas as pd
import streamlit as st
options = ("female", "male")
@@ -30,11 +31,15 @@
i5 = st.radio("radio 5", options, horizontal=True)
st.write("value 5:", i5)
+i6 = st.radio("radio 6", pd.DataFrame({"foo": list(options)}))
+st.write("value 6:", i6)
+
+
if st._is_running_with_streamlit:
def on_change():
st.session_state.radio_changed = True
- st.radio("radio 6", options, 1, key="radio6", on_change=on_change)
- st.write("value 6:", st.session_state.radio6)
+ st.radio("radio 7", options, 1, key="radio7", on_change=on_change)
+ st.write("value 7:", st.session_state.radio7)
st.write("radio changed:", "radio_changed" in st.session_state)
diff --git a/lib/streamlit/elements/radio.py b/lib/streamlit/elements/radio.py
--- a/lib/streamlit/elements/radio.py
+++ b/lib/streamlit/elements/radio.py
@@ -169,9 +169,9 @@
return opt[idx] if len(opt) > 0 and opt[idx] is not None else None
def serialize_radio(v):
- if len(options) == 0:
+ if len(opt) == 0:
return 0
- return index_(options, v)
+ return index_(opt, v)
widget_state = register_widget(
"radio",
| {"golden_diff": "diff --git a/e2e/scripts/st_radio.py b/e2e/scripts/st_radio.py\n--- a/e2e/scripts/st_radio.py\n+++ b/e2e/scripts/st_radio.py\n@@ -12,6 +12,7 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+import pandas as pd\n import streamlit as st\n \n options = (\"female\", \"male\")\n@@ -30,11 +31,15 @@\n i5 = st.radio(\"radio 5\", options, horizontal=True)\n st.write(\"value 5:\", i5)\n \n+i6 = st.radio(\"radio 6\", pd.DataFrame({\"foo\": list(options)}))\n+st.write(\"value 6:\", i6)\n+\n+\n if st._is_running_with_streamlit:\n \n def on_change():\n st.session_state.radio_changed = True\n \n- st.radio(\"radio 6\", options, 1, key=\"radio6\", on_change=on_change)\n- st.write(\"value 6:\", st.session_state.radio6)\n+ st.radio(\"radio 7\", options, 1, key=\"radio7\", on_change=on_change)\n+ st.write(\"value 7:\", st.session_state.radio7)\n st.write(\"radio changed:\", \"radio_changed\" in st.session_state)\ndiff --git a/lib/streamlit/elements/radio.py b/lib/streamlit/elements/radio.py\n--- a/lib/streamlit/elements/radio.py\n+++ b/lib/streamlit/elements/radio.py\n@@ -169,9 +169,9 @@\n return opt[idx] if len(opt) > 0 and opt[idx] is not None else None\n \n def serialize_radio(v):\n- if len(options) == 0:\n+ if len(opt) == 0:\n return 0\n- return index_(options, v)\n+ return index_(opt, v)\n \n widget_state = register_widget(\n \"radio\",\n", "issue": "st.radio with DataFrame fails on rerun\n### Summary\r\n\r\nWhen you pass a DataFrame as the options in an st.radio, every rerun throws an error (but the first run works).\r\n\r\n### Steps to reproduce\r\n\r\nCode snippet:\r\n\r\n```\r\nimport streamlit as st\r\nimport pandas as pd\r\n\r\ndf = pd.DataFrame({'foo': ['one', 'two']})\r\nst.radio('Foo', df)\r\n```\r\n\r\n1. Run the code above.\r\n2. Press \"R\" to rerun the code above.\r\n\r\n**Expected behavior:**\r\n\r\nThe rerun works, just like the first run.\r\n\r\n**Actual behavior:**\r\n\r\nThe app hangs (stays in running state forever) and shows the error below in the terminal:\r\n\r\n```\r\nException in thread ScriptRunner.scriptThread:\r\nTraceback (most recent call last):\r\n File \"/usr/local/Cellar/[email protected]/3.9.5/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py\", line 954, in _bootstrap_inner\r\n self.run()\r\n File \"/usr/local/Cellar/[email protected]/3.9.5/Frameworks/Python.framework/Versions/3.9/lib/python3.9/threading.py\", line 892, in run\r\n self._target(*self._args, **self._kwargs)\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/script_runner.py\", line 210, in _process_request_queue\r\n widget_states = self._session_state.as_widget_states()\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py\", line 560, in as_widget_states\r\n return self._new_widget_state.as_widget_states()\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py\", line 211, in as_widget_states\r\n states = [\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py\", line 214, in <listcomp>\r\n if self.get_serialized(widget_id)\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/state/session_state.py\", line 190, in get_serialized\r\n serialized = metadata.serializer(item.value)\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/elements/radio.py\", line 136, in serialize_radio\r\n return index_(options, v)\r\n File \"/Users/[HIDDEN]/.venv/lib/python3.9/site-packages/streamlit/util.py\", line 129, in index_\r\n raise ValueError(\"{} is not in iterable\".format(str(x)))\r\nValueError: one is not in iterable\r\n```\r\n\r\n\r\n### Is this a regression?\r\n\r\nyes \r\n\r\nPrevious known working version = 0.84.0\r\n\r\n### Debug info\r\n\r\n- Streamlit version: 1.4.0\r\n- Python version: 3.9.5\r\n\r\n### Additional information\r\n\r\nA meta-bug related to this: I'm not sure why this error is thrown in the terminal rather than inside the Streamlit app. Previously, our goal was to have _every_ error appear in the app, so you never had to check the terminal. It would be great to see if some code change unexpectedly changed this behavior.\n", "before_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport streamlit as st\n\noptions = (\"female\", \"male\")\ni1 = st.radio(\"radio 1\", options, 1)\nst.write(\"value 1:\", i1)\n\ni2 = st.radio(\"radio 2\", options, 0, format_func=lambda x: x.capitalize())\nst.write(\"value 2:\", i2)\n\ni3 = st.radio(\"radio 3\", [])\nst.write(\"value 3:\", i3)\n\ni4 = st.radio(\"radio 4\", options, disabled=True)\nst.write(\"value 4:\", i4)\n\ni5 = st.radio(\"radio 5\", options, horizontal=True)\nst.write(\"value 5:\", i5)\n\nif st._is_running_with_streamlit:\n\n def on_change():\n st.session_state.radio_changed = True\n\n st.radio(\"radio 6\", options, 1, key=\"radio6\", on_change=on_change)\n st.write(\"value 6:\", st.session_state.radio6)\n st.write(\"radio changed:\", \"radio_changed\" in st.session_state)\n", "path": "e2e/scripts/st_radio.py"}, {"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom textwrap import dedent\nfrom typing import Any, Callable, Optional, cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.Radio_pb2 import Radio as RadioProto\nfrom streamlit.scriptrunner import ScriptRunContext, get_script_run_ctx\nfrom streamlit.state import (\n register_widget,\n WidgetArgs,\n WidgetCallback,\n WidgetKwargs,\n)\nfrom streamlit.type_util import Key, OptionSequence, ensure_indexable, to_key\nfrom streamlit.util import index_\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass RadioMixin:\n def radio(\n self,\n label: str,\n options: OptionSequence,\n index: int = 0,\n format_func: Callable[[Any], Any] = str,\n key: Optional[Key] = None,\n help: Optional[str] = None,\n on_change: Optional[WidgetCallback] = None,\n args: Optional[WidgetArgs] = None,\n kwargs: Optional[WidgetKwargs] = None,\n *, # keyword-only args:\n disabled: bool = False,\n horizontal: bool = False,\n ) -> Any:\n \"\"\"Display a radio button widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this radio group is for.\n options : Sequence, numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index\n Labels for the radio options. This will be cast to str internally\n by default. For pandas.DataFrame, the first column is selected.\n index : int\n The index of the preselected option on first render.\n format_func : function\n Function to modify the display of radio options. It receives\n the raw option as an argument and should output the label to be\n shown for that option. This has no impact on the return value of\n the radio.\n key : str or int\n An optional string or integer to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the radio.\n on_change : callable\n An optional callback invoked when this radio's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n disabled : bool\n An optional boolean, which disables the radio button if set to\n True. The default is False. This argument can only be supplied by\n keyword.\n horizontal : bool\n An optional boolean, which orients the radio group horizontally.\n The default is false (vertical buttons). This argument can only\n be supplied by keyword.\n\n Returns\n -------\n any\n The selected option.\n\n Example\n -------\n >>> genre = st.radio(\n ... \"What\\'s your favorite movie genre\",\n ... ('Comedy', 'Drama', 'Documentary'))\n >>>\n >>> if genre == 'Comedy':\n ... st.write('You selected comedy.')\n ... else:\n ... st.write(\"You didn\\'t select comedy.\")\n\n .. output::\n https://doc-radio.streamlitapp.com/\n height: 260px\n\n \"\"\"\n ctx = get_script_run_ctx()\n return self._radio(\n label=label,\n options=options,\n index=index,\n format_func=format_func,\n key=key,\n help=help,\n on_change=on_change,\n args=args,\n kwargs=kwargs,\n disabled=disabled,\n horizontal=horizontal,\n ctx=ctx,\n )\n\n def _radio(\n self,\n label: str,\n options: OptionSequence,\n index: int = 0,\n format_func: Callable[[Any], Any] = str,\n key: Optional[Key] = None,\n help: Optional[str] = None,\n on_change: Optional[WidgetCallback] = None,\n args: Optional[WidgetArgs] = None,\n kwargs: Optional[WidgetKwargs] = None,\n *, # keyword-only args:\n disabled: bool = False,\n horizontal: bool = False,\n ctx: Optional[ScriptRunContext],\n ) -> Any:\n key = to_key(key)\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if index == 0 else index, key=key)\n\n opt = ensure_indexable(options)\n\n if not isinstance(index, int):\n raise StreamlitAPIException(\n \"Radio Value has invalid type: %s\" % type(index).__name__\n )\n\n if len(opt) > 0 and not 0 <= index < len(opt):\n raise StreamlitAPIException(\n \"Radio index must be between 0 and length of options\"\n )\n\n radio_proto = RadioProto()\n radio_proto.label = label\n radio_proto.default = index\n radio_proto.options[:] = [str(format_func(option)) for option in opt]\n radio_proto.form_id = current_form_id(self.dg)\n radio_proto.horizontal = horizontal\n if help is not None:\n radio_proto.help = dedent(help)\n\n def deserialize_radio(ui_value, widget_id=\"\"):\n idx = ui_value if ui_value is not None else index\n\n return opt[idx] if len(opt) > 0 and opt[idx] is not None else None\n\n def serialize_radio(v):\n if len(options) == 0:\n return 0\n return index_(options, v)\n\n widget_state = register_widget(\n \"radio\",\n radio_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_radio,\n serializer=serialize_radio,\n ctx=ctx,\n )\n\n # This needs to be done after register_widget because we don't want\n # the following proto fields to affect a widget's ID.\n radio_proto.disabled = disabled\n if widget_state.value_changed:\n radio_proto.value = serialize_radio(widget_state.value)\n radio_proto.set_value = True\n\n self.dg._enqueue(\"radio\", radio_proto)\n return widget_state.value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/radio.py"}], "after_files": [{"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pandas as pd\nimport streamlit as st\n\noptions = (\"female\", \"male\")\ni1 = st.radio(\"radio 1\", options, 1)\nst.write(\"value 1:\", i1)\n\ni2 = st.radio(\"radio 2\", options, 0, format_func=lambda x: x.capitalize())\nst.write(\"value 2:\", i2)\n\ni3 = st.radio(\"radio 3\", [])\nst.write(\"value 3:\", i3)\n\ni4 = st.radio(\"radio 4\", options, disabled=True)\nst.write(\"value 4:\", i4)\n\ni5 = st.radio(\"radio 5\", options, horizontal=True)\nst.write(\"value 5:\", i5)\n\ni6 = st.radio(\"radio 6\", pd.DataFrame({\"foo\": list(options)}))\nst.write(\"value 6:\", i6)\n\n\nif st._is_running_with_streamlit:\n\n def on_change():\n st.session_state.radio_changed = True\n\n st.radio(\"radio 7\", options, 1, key=\"radio7\", on_change=on_change)\n st.write(\"value 7:\", st.session_state.radio7)\n st.write(\"radio changed:\", \"radio_changed\" in st.session_state)\n", "path": "e2e/scripts/st_radio.py"}, {"content": "# Copyright 2018-2022 Streamlit Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom textwrap import dedent\nfrom typing import Any, Callable, Optional, cast\n\nimport streamlit\nfrom streamlit.errors import StreamlitAPIException\nfrom streamlit.proto.Radio_pb2 import Radio as RadioProto\nfrom streamlit.scriptrunner import ScriptRunContext, get_script_run_ctx\nfrom streamlit.state import (\n register_widget,\n WidgetArgs,\n WidgetCallback,\n WidgetKwargs,\n)\nfrom streamlit.type_util import Key, OptionSequence, ensure_indexable, to_key\nfrom streamlit.util import index_\nfrom .form import current_form_id\nfrom .utils import check_callback_rules, check_session_state_rules\n\n\nclass RadioMixin:\n def radio(\n self,\n label: str,\n options: OptionSequence,\n index: int = 0,\n format_func: Callable[[Any], Any] = str,\n key: Optional[Key] = None,\n help: Optional[str] = None,\n on_change: Optional[WidgetCallback] = None,\n args: Optional[WidgetArgs] = None,\n kwargs: Optional[WidgetKwargs] = None,\n *, # keyword-only args:\n disabled: bool = False,\n horizontal: bool = False,\n ) -> Any:\n \"\"\"Display a radio button widget.\n\n Parameters\n ----------\n label : str\n A short label explaining to the user what this radio group is for.\n options : Sequence, numpy.ndarray, pandas.Series, pandas.DataFrame, or pandas.Index\n Labels for the radio options. This will be cast to str internally\n by default. For pandas.DataFrame, the first column is selected.\n index : int\n The index of the preselected option on first render.\n format_func : function\n Function to modify the display of radio options. It receives\n the raw option as an argument and should output the label to be\n shown for that option. This has no impact on the return value of\n the radio.\n key : str or int\n An optional string or integer to use as the unique key for the widget.\n If this is omitted, a key will be generated for the widget\n based on its content. Multiple widgets of the same type may\n not share the same key.\n help : str\n An optional tooltip that gets displayed next to the radio.\n on_change : callable\n An optional callback invoked when this radio's value changes.\n args : tuple\n An optional tuple of args to pass to the callback.\n kwargs : dict\n An optional dict of kwargs to pass to the callback.\n disabled : bool\n An optional boolean, which disables the radio button if set to\n True. The default is False. This argument can only be supplied by\n keyword.\n horizontal : bool\n An optional boolean, which orients the radio group horizontally.\n The default is false (vertical buttons). This argument can only\n be supplied by keyword.\n\n Returns\n -------\n any\n The selected option.\n\n Example\n -------\n >>> genre = st.radio(\n ... \"What\\'s your favorite movie genre\",\n ... ('Comedy', 'Drama', 'Documentary'))\n >>>\n >>> if genre == 'Comedy':\n ... st.write('You selected comedy.')\n ... else:\n ... st.write(\"You didn\\'t select comedy.\")\n\n .. output::\n https://doc-radio.streamlitapp.com/\n height: 260px\n\n \"\"\"\n ctx = get_script_run_ctx()\n return self._radio(\n label=label,\n options=options,\n index=index,\n format_func=format_func,\n key=key,\n help=help,\n on_change=on_change,\n args=args,\n kwargs=kwargs,\n disabled=disabled,\n horizontal=horizontal,\n ctx=ctx,\n )\n\n def _radio(\n self,\n label: str,\n options: OptionSequence,\n index: int = 0,\n format_func: Callable[[Any], Any] = str,\n key: Optional[Key] = None,\n help: Optional[str] = None,\n on_change: Optional[WidgetCallback] = None,\n args: Optional[WidgetArgs] = None,\n kwargs: Optional[WidgetKwargs] = None,\n *, # keyword-only args:\n disabled: bool = False,\n horizontal: bool = False,\n ctx: Optional[ScriptRunContext],\n ) -> Any:\n key = to_key(key)\n check_callback_rules(self.dg, on_change)\n check_session_state_rules(default_value=None if index == 0 else index, key=key)\n\n opt = ensure_indexable(options)\n\n if not isinstance(index, int):\n raise StreamlitAPIException(\n \"Radio Value has invalid type: %s\" % type(index).__name__\n )\n\n if len(opt) > 0 and not 0 <= index < len(opt):\n raise StreamlitAPIException(\n \"Radio index must be between 0 and length of options\"\n )\n\n radio_proto = RadioProto()\n radio_proto.label = label\n radio_proto.default = index\n radio_proto.options[:] = [str(format_func(option)) for option in opt]\n radio_proto.form_id = current_form_id(self.dg)\n radio_proto.horizontal = horizontal\n if help is not None:\n radio_proto.help = dedent(help)\n\n def deserialize_radio(ui_value, widget_id=\"\"):\n idx = ui_value if ui_value is not None else index\n\n return opt[idx] if len(opt) > 0 and opt[idx] is not None else None\n\n def serialize_radio(v):\n if len(opt) == 0:\n return 0\n return index_(opt, v)\n\n widget_state = register_widget(\n \"radio\",\n radio_proto,\n user_key=key,\n on_change_handler=on_change,\n args=args,\n kwargs=kwargs,\n deserializer=deserialize_radio,\n serializer=serialize_radio,\n ctx=ctx,\n )\n\n # This needs to be done after register_widget because we don't want\n # the following proto fields to affect a widget's ID.\n radio_proto.disabled = disabled\n if widget_state.value_changed:\n radio_proto.value = serialize_radio(widget_state.value)\n radio_proto.set_value = True\n\n self.dg._enqueue(\"radio\", radio_proto)\n return widget_state.value\n\n @property\n def dg(self) -> \"streamlit.delta_generator.DeltaGenerator\":\n \"\"\"Get our DeltaGenerator.\"\"\"\n return cast(\"streamlit.delta_generator.DeltaGenerator\", self)\n", "path": "lib/streamlit/elements/radio.py"}]} | 3,474 | 424 |
gh_patches_debug_15832 | rasdani/github-patches | git_diff | conan-io__conan-2963 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error with v1.4
Hello,
I have the following Conan recipe
```
# cat conanfile.txt
[requires]
bitprim-node-cint/0.10.0@bitprim/testing
[generators]
cmake
[options]
bitprim-node-cint:shared=True
bitprim-node-cint:currency=BCH
[imports]
bin, *.dll -> .
lib, *.so -> .
lib, *.dylib -> .
```
When I execute: `conan install .`
I get the following errors:
```
...
PROJECT: Generator txt created conanbuildinfo.txt
PROJECT: Generated conaninfo.txt
Traceback (most recent call last):
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/command.py", line 1182, in run
method(args[0][1:])
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/command.py", line 325, in install
install_folder=args.install_folder)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/conan_api.py", line 77, in wrapper
return f(*args, **kwargs)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/conan_api.py", line 465, in install
no_imports=no_imports)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/manager.py", line 344, in install
run_imports(conanfile, install_folder, output)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/importer.py", line 82, in run_imports
conanfile.imports()
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/loader_parse.py", line 184, in imports
conan_file.copy(*import_params)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/importer.py", line 160, in __call__
excludes=excludes, keep_path=keep_path)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/file_copier.py", line 83, in __call__
self._link_folders(src, dst, link_folders)
File "/home/fernando/.local/lib/python2.7/site-packages/conans/client/file_copier.py", line 149, in _link_folders
os.symlink(link, dst_link)
OSError: [Errno 2] No such file or directory
ERROR: [Errno 2] No such file or directory
```
```
$ conan --version
Conan version 1.4.0
$ python --version
Python 2.7.15
$ lsb_release -a
LSB Version: :core-4.1-amd64:core-4.1-noarch
Distributor ID: Fedora
Description: Fedora release 28 (Twenty Eight)
Release: 28
Codename: TwentyEight
```
It works fine with Conan 1.3.3.
Thanks and regards,
Fernando.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conans/client/file_copier.py`
Content:
```
1 import os
2 import fnmatch
3 import shutil
4 from collections import defaultdict
5
6 from conans import tools
7
8
9 def report_copied_files(copied, output):
10 ext_files = defaultdict(list)
11 for f in copied:
12 _, ext = os.path.splitext(f)
13 ext_files[ext].append(os.path.basename(f))
14
15 if not ext_files:
16 return False
17
18 for ext, files in ext_files.items():
19 files_str = (", ".join(files)) if len(files) < 5 else ""
20 file_or_files = "file" if len(files) == 1 else "files"
21 if not ext:
22 output.info("Copied %d %s: %s" % (len(files), file_or_files, files_str))
23 else:
24 output.info("Copied %d '%s' %s: %s" % (len(files), ext, file_or_files, files_str))
25 return True
26
27
28 class FileCopier(object):
29 """ main responsible of copying files from place to place:
30 package: build folder -> package folder
31 imports: package folder -> user folder
32 export: user folder -> store "export" folder
33 """
34 def __init__(self, root_source_folder, root_destination_folder, excluded=None):
35 """
36 Takes the base folders to copy resources src -> dst. These folders names
37 will not be used in the relative names while copying
38 param root_source_folder: The base folder to copy things from, typically the
39 store build folder
40 param root_destination_folder: The base folder to copy things to, typicall the
41 store package folder
42 """
43 self._base_src = root_source_folder
44 self._base_dst = root_destination_folder
45 self._copied = []
46 self._excluded = [root_destination_folder]
47 if excluded:
48 self._excluded.append(excluded)
49
50 def report(self, output):
51 return report_copied_files(self._copied, output)
52
53 def __call__(self, pattern, dst="", src="", keep_path=True, links=False, symlinks=None,
54 excludes=None, ignore_case=False):
55 """
56 param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll
57 param dst: the destination local folder, wrt to current conanfile dir, to which
58 the files will be copied. Eg: "bin"
59 param src: the source folder in which those files will be searched. This folder
60 will be stripped from the dst name. Eg.: lib/Debug/x86
61 param keep_path: False if you want the relative paths to be maintained from
62 src to dst folders, or just drop. False is useful if you want
63 to collect e.g. many *.libs among many dirs into a single
64 lib dir
65 return: list of copied files
66 """
67 if symlinks is not None:
68 links = symlinks
69 # Check for ../ patterns and allow them
70 if pattern.startswith(".."):
71 rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))
72 base_src = os.path.dirname(rel_dir)
73 pattern = os.path.basename(rel_dir)
74 else:
75 base_src = self._base_src
76
77 src = os.path.join(base_src, src)
78 dst = os.path.join(self._base_dst, dst)
79
80 files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,
81 ignore_case)
82 copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)
83 self._link_folders(src, dst, link_folders)
84 self._copied.extend(files_to_copy)
85 return copied_files
86
87 def _filter_files(self, src, pattern, links, excludes, ignore_case):
88
89 """ return a list of the files matching the patterns
90 The list will be relative path names wrt to the root src folder
91 """
92 filenames = []
93 linked_folders = []
94 for root, subfolders, files in os.walk(src, followlinks=True):
95 if root in self._excluded:
96 subfolders[:] = []
97 continue
98
99 if links and os.path.islink(root):
100 linked_folders.append(os.path.relpath(root, src))
101 subfolders[:] = []
102 continue
103 basename = os.path.basename(root)
104 # Skip git or svn subfolders
105 if basename in [".git", ".svn"]:
106 subfolders[:] = []
107 continue
108 if basename == "test_package": # DO NOT export test_package/build folder
109 try:
110 subfolders.remove("build")
111 except:
112 pass
113
114 relative_path = os.path.relpath(root, src)
115 for f in files:
116 relative_name = os.path.normpath(os.path.join(relative_path, f))
117 filenames.append(relative_name)
118
119 if ignore_case:
120 filenames = {f.lower(): f for f in filenames}
121 pattern = pattern.lower()
122
123 files_to_copy = fnmatch.filter(filenames, pattern)
124 if excludes:
125 if not isinstance(excludes, (tuple, list)):
126 excludes = (excludes, )
127 if ignore_case:
128 excludes = [e.lower() for e in excludes]
129 for exclude in excludes:
130 files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]
131
132 if ignore_case:
133 files_to_copy = [filenames[f] for f in files_to_copy]
134
135 return files_to_copy, linked_folders
136
137 @staticmethod
138 def _link_folders(src, dst, linked_folders):
139 for linked_folder in linked_folders:
140 link = os.readlink(os.path.join(src, linked_folder))
141 dst_link = os.path.join(dst, linked_folder)
142 try:
143 # Remove the previous symlink
144 os.remove(dst_link)
145 except OSError:
146 pass
147 # link is a string relative to linked_folder
148 # e.j: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
149 os.symlink(link, dst_link)
150 # Remove empty links
151 for linked_folder in linked_folders:
152 dst_link = os.path.join(dst, linked_folder)
153 abs_path = os.path.realpath(dst_link)
154 if not os.path.exists(abs_path):
155 os.remove(dst_link)
156
157 @staticmethod
158 def _copy_files(files, src, dst, keep_path, symlinks):
159 """ executes a multiple file copy from [(src_file, dst_file), (..)]
160 managing symlinks if necessary
161 """
162 copied_files = []
163 for filename in files:
164 abs_src_name = os.path.join(src, filename)
165 filename = filename if keep_path else os.path.basename(filename)
166 abs_dst_name = os.path.normpath(os.path.join(dst, filename))
167 try:
168 os.makedirs(os.path.dirname(abs_dst_name))
169 except:
170 pass
171 if symlinks and os.path.islink(abs_src_name):
172 linkto = os.readlink(abs_src_name) # @UndefinedVariable
173 try:
174 os.remove(abs_dst_name)
175 except OSError:
176 pass
177 os.symlink(linkto, abs_dst_name) # @UndefinedVariable
178 else:
179 shutil.copy2(abs_src_name, abs_dst_name)
180 copied_files.append(abs_dst_name)
181 return copied_files
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/conans/client/file_copier.py b/conans/client/file_copier.py
--- a/conans/client/file_copier.py
+++ b/conans/client/file_copier.py
@@ -3,7 +3,7 @@
import shutil
from collections import defaultdict
-from conans import tools
+from conans.util.files import mkdir
def report_copied_files(copied, output):
@@ -146,6 +146,7 @@
pass
# link is a string relative to linked_folder
# e.j: os.symlink("test/bar", "./foo/test_link") will create a link to foo/test/bar in ./foo/test_link
+ mkdir(os.path.dirname(dst_link))
os.symlink(link, dst_link)
# Remove empty links
for linked_folder in linked_folders:
| {"golden_diff": "diff --git a/conans/client/file_copier.py b/conans/client/file_copier.py\n--- a/conans/client/file_copier.py\n+++ b/conans/client/file_copier.py\n@@ -3,7 +3,7 @@\n import shutil\n from collections import defaultdict\n \n-from conans import tools\n+from conans.util.files import mkdir\n \n \n def report_copied_files(copied, output):\n@@ -146,6 +146,7 @@\n pass\n # link is a string relative to linked_folder\n # e.j: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n+ mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n # Remove empty links\n for linked_folder in linked_folders:\n", "issue": "Error with v1.4\nHello,\r\n\r\nI have the following Conan recipe\r\n\r\n```\r\n# cat conanfile.txt \r\n\r\n[requires]\r\nbitprim-node-cint/0.10.0@bitprim/testing\r\n[generators]\r\ncmake\r\n[options]\r\nbitprim-node-cint:shared=True\r\nbitprim-node-cint:currency=BCH\r\n[imports]\r\nbin, *.dll -> .\r\nlib, *.so -> .\r\nlib, *.dylib -> .\r\n``` \r\n\r\nWhen I execute: `conan install .` \r\nI get the following errors:\r\n\r\n```\r\n...\r\nPROJECT: Generator txt created conanbuildinfo.txt\r\nPROJECT: Generated conaninfo.txt\r\nTraceback (most recent call last):\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/command.py\", line 1182, in run\r\n method(args[0][1:])\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/command.py\", line 325, in install\r\n install_folder=args.install_folder)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/conan_api.py\", line 77, in wrapper\r\n return f(*args, **kwargs)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/conan_api.py\", line 465, in install\r\n no_imports=no_imports)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/manager.py\", line 344, in install\r\n run_imports(conanfile, install_folder, output)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/importer.py\", line 82, in run_imports\r\n conanfile.imports()\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/loader_parse.py\", line 184, in imports\r\n conan_file.copy(*import_params)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/importer.py\", line 160, in __call__\r\n excludes=excludes, keep_path=keep_path)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/file_copier.py\", line 83, in __call__\r\n self._link_folders(src, dst, link_folders)\r\n File \"/home/fernando/.local/lib/python2.7/site-packages/conans/client/file_copier.py\", line 149, in _link_folders\r\n os.symlink(link, dst_link)\r\nOSError: [Errno 2] No such file or directory\r\n\r\nERROR: [Errno 2] No such file or directory\r\n```\r\n\r\n```\r\n$ conan --version\r\nConan version 1.4.0\r\n\r\n$ python --version\r\nPython 2.7.15\r\n\r\n$ lsb_release -a\r\nLSB Version:\t:core-4.1-amd64:core-4.1-noarch\r\nDistributor ID:\tFedora\r\nDescription:\tFedora release 28 (Twenty Eight)\r\nRelease:\t28\r\nCodename:\tTwentyEight\r\n\r\n```\r\n\r\nIt works fine with Conan 1.3.3.\r\n\r\nThanks and regards,\r\nFernando.\n", "before_files": [{"content": "import os\nimport fnmatch\nimport shutil\nfrom collections import defaultdict\n\nfrom conans import tools\n\n\ndef report_copied_files(copied, output):\n ext_files = defaultdict(list)\n for f in copied:\n _, ext = os.path.splitext(f)\n ext_files[ext].append(os.path.basename(f))\n\n if not ext_files:\n return False\n\n for ext, files in ext_files.items():\n files_str = (\", \".join(files)) if len(files) < 5 else \"\"\n file_or_files = \"file\" if len(files) == 1 else \"files\"\n if not ext:\n output.info(\"Copied %d %s: %s\" % (len(files), file_or_files, files_str))\n else:\n output.info(\"Copied %d '%s' %s: %s\" % (len(files), ext, file_or_files, files_str))\n return True\n\n\nclass FileCopier(object):\n \"\"\" main responsible of copying files from place to place:\n package: build folder -> package folder\n imports: package folder -> user folder\n export: user folder -> store \"export\" folder\n \"\"\"\n def __init__(self, root_source_folder, root_destination_folder, excluded=None):\n \"\"\"\n Takes the base folders to copy resources src -> dst. These folders names\n will not be used in the relative names while copying\n param root_source_folder: The base folder to copy things from, typically the\n store build folder\n param root_destination_folder: The base folder to copy things to, typicall the\n store package folder\n \"\"\"\n self._base_src = root_source_folder\n self._base_dst = root_destination_folder\n self._copied = []\n self._excluded = [root_destination_folder]\n if excluded:\n self._excluded.append(excluded)\n\n def report(self, output):\n return report_copied_files(self._copied, output)\n\n def __call__(self, pattern, dst=\"\", src=\"\", keep_path=True, links=False, symlinks=None,\n excludes=None, ignore_case=False):\n \"\"\"\n param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll\n param dst: the destination local folder, wrt to current conanfile dir, to which\n the files will be copied. Eg: \"bin\"\n param src: the source folder in which those files will be searched. This folder\n will be stripped from the dst name. Eg.: lib/Debug/x86\n param keep_path: False if you want the relative paths to be maintained from\n src to dst folders, or just drop. False is useful if you want\n to collect e.g. many *.libs among many dirs into a single\n lib dir\n return: list of copied files\n \"\"\"\n if symlinks is not None:\n links = symlinks\n # Check for ../ patterns and allow them\n if pattern.startswith(\"..\"):\n rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))\n base_src = os.path.dirname(rel_dir)\n pattern = os.path.basename(rel_dir)\n else:\n base_src = self._base_src\n\n src = os.path.join(base_src, src)\n dst = os.path.join(self._base_dst, dst)\n\n files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,\n ignore_case)\n copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)\n self._link_folders(src, dst, link_folders)\n self._copied.extend(files_to_copy)\n return copied_files\n\n def _filter_files(self, src, pattern, links, excludes, ignore_case):\n\n \"\"\" return a list of the files matching the patterns\n The list will be relative path names wrt to the root src folder\n \"\"\"\n filenames = []\n linked_folders = []\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n continue\n\n if links and os.path.islink(root):\n linked_folders.append(os.path.relpath(root, src))\n subfolders[:] = []\n continue\n basename = os.path.basename(root)\n # Skip git or svn subfolders\n if basename in [\".git\", \".svn\"]:\n subfolders[:] = []\n continue\n if basename == \"test_package\": # DO NOT export test_package/build folder\n try:\n subfolders.remove(\"build\")\n except:\n pass\n\n relative_path = os.path.relpath(root, src)\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n\n if ignore_case:\n filenames = {f.lower(): f for f in filenames}\n pattern = pattern.lower()\n\n files_to_copy = fnmatch.filter(filenames, pattern)\n if excludes:\n if not isinstance(excludes, (tuple, list)):\n excludes = (excludes, )\n if ignore_case:\n excludes = [e.lower() for e in excludes]\n for exclude in excludes:\n files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n\n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n\n return files_to_copy, linked_folders\n\n @staticmethod\n def _link_folders(src, dst, linked_folders):\n for linked_folder in linked_folders:\n link = os.readlink(os.path.join(src, linked_folder))\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n os.remove(dst_link)\n except OSError:\n pass\n # link is a string relative to linked_folder\n # e.j: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n os.symlink(link, dst_link)\n # Remove empty links\n for linked_folder in linked_folders:\n dst_link = os.path.join(dst, linked_folder)\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n os.remove(dst_link)\n\n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n \"\"\" executes a multiple file copy from [(src_file, dst_file), (..)]\n managing symlinks if necessary\n \"\"\"\n copied_files = []\n for filename in files:\n abs_src_name = os.path.join(src, filename)\n filename = filename if keep_path else os.path.basename(filename)\n abs_dst_name = os.path.normpath(os.path.join(dst, filename))\n try:\n os.makedirs(os.path.dirname(abs_dst_name))\n except:\n pass\n if symlinks and os.path.islink(abs_src_name):\n linkto = os.readlink(abs_src_name) # @UndefinedVariable\n try:\n os.remove(abs_dst_name)\n except OSError:\n pass\n os.symlink(linkto, abs_dst_name) # @UndefinedVariable\n else:\n shutil.copy2(abs_src_name, abs_dst_name)\n copied_files.append(abs_dst_name)\n return copied_files\n", "path": "conans/client/file_copier.py"}], "after_files": [{"content": "import os\nimport fnmatch\nimport shutil\nfrom collections import defaultdict\n\nfrom conans.util.files import mkdir\n\n\ndef report_copied_files(copied, output):\n ext_files = defaultdict(list)\n for f in copied:\n _, ext = os.path.splitext(f)\n ext_files[ext].append(os.path.basename(f))\n\n if not ext_files:\n return False\n\n for ext, files in ext_files.items():\n files_str = (\", \".join(files)) if len(files) < 5 else \"\"\n file_or_files = \"file\" if len(files) == 1 else \"files\"\n if not ext:\n output.info(\"Copied %d %s: %s\" % (len(files), file_or_files, files_str))\n else:\n output.info(\"Copied %d '%s' %s: %s\" % (len(files), ext, file_or_files, files_str))\n return True\n\n\nclass FileCopier(object):\n \"\"\" main responsible of copying files from place to place:\n package: build folder -> package folder\n imports: package folder -> user folder\n export: user folder -> store \"export\" folder\n \"\"\"\n def __init__(self, root_source_folder, root_destination_folder, excluded=None):\n \"\"\"\n Takes the base folders to copy resources src -> dst. These folders names\n will not be used in the relative names while copying\n param root_source_folder: The base folder to copy things from, typically the\n store build folder\n param root_destination_folder: The base folder to copy things to, typicall the\n store package folder\n \"\"\"\n self._base_src = root_source_folder\n self._base_dst = root_destination_folder\n self._copied = []\n self._excluded = [root_destination_folder]\n if excluded:\n self._excluded.append(excluded)\n\n def report(self, output):\n return report_copied_files(self._copied, output)\n\n def __call__(self, pattern, dst=\"\", src=\"\", keep_path=True, links=False, symlinks=None,\n excludes=None, ignore_case=False):\n \"\"\"\n param pattern: an fnmatch file pattern of the files that should be copied. Eg. *.dll\n param dst: the destination local folder, wrt to current conanfile dir, to which\n the files will be copied. Eg: \"bin\"\n param src: the source folder in which those files will be searched. This folder\n will be stripped from the dst name. Eg.: lib/Debug/x86\n param keep_path: False if you want the relative paths to be maintained from\n src to dst folders, or just drop. False is useful if you want\n to collect e.g. many *.libs among many dirs into a single\n lib dir\n return: list of copied files\n \"\"\"\n if symlinks is not None:\n links = symlinks\n # Check for ../ patterns and allow them\n if pattern.startswith(\"..\"):\n rel_dir = os.path.abspath(os.path.join(self._base_src, pattern))\n base_src = os.path.dirname(rel_dir)\n pattern = os.path.basename(rel_dir)\n else:\n base_src = self._base_src\n\n src = os.path.join(base_src, src)\n dst = os.path.join(self._base_dst, dst)\n\n files_to_copy, link_folders = self._filter_files(src, pattern, links, excludes,\n ignore_case)\n copied_files = self._copy_files(files_to_copy, src, dst, keep_path, links)\n self._link_folders(src, dst, link_folders)\n self._copied.extend(files_to_copy)\n return copied_files\n\n def _filter_files(self, src, pattern, links, excludes, ignore_case):\n\n \"\"\" return a list of the files matching the patterns\n The list will be relative path names wrt to the root src folder\n \"\"\"\n filenames = []\n linked_folders = []\n for root, subfolders, files in os.walk(src, followlinks=True):\n if root in self._excluded:\n subfolders[:] = []\n continue\n\n if links and os.path.islink(root):\n linked_folders.append(os.path.relpath(root, src))\n subfolders[:] = []\n continue\n basename = os.path.basename(root)\n # Skip git or svn subfolders\n if basename in [\".git\", \".svn\"]:\n subfolders[:] = []\n continue\n if basename == \"test_package\": # DO NOT export test_package/build folder\n try:\n subfolders.remove(\"build\")\n except:\n pass\n\n relative_path = os.path.relpath(root, src)\n for f in files:\n relative_name = os.path.normpath(os.path.join(relative_path, f))\n filenames.append(relative_name)\n\n if ignore_case:\n filenames = {f.lower(): f for f in filenames}\n pattern = pattern.lower()\n\n files_to_copy = fnmatch.filter(filenames, pattern)\n if excludes:\n if not isinstance(excludes, (tuple, list)):\n excludes = (excludes, )\n if ignore_case:\n excludes = [e.lower() for e in excludes]\n for exclude in excludes:\n files_to_copy = [f for f in files_to_copy if not fnmatch.fnmatch(f, exclude)]\n\n if ignore_case:\n files_to_copy = [filenames[f] for f in files_to_copy]\n\n return files_to_copy, linked_folders\n\n @staticmethod\n def _link_folders(src, dst, linked_folders):\n for linked_folder in linked_folders:\n link = os.readlink(os.path.join(src, linked_folder))\n dst_link = os.path.join(dst, linked_folder)\n try:\n # Remove the previous symlink\n os.remove(dst_link)\n except OSError:\n pass\n # link is a string relative to linked_folder\n # e.j: os.symlink(\"test/bar\", \"./foo/test_link\") will create a link to foo/test/bar in ./foo/test_link\n mkdir(os.path.dirname(dst_link))\n os.symlink(link, dst_link)\n # Remove empty links\n for linked_folder in linked_folders:\n dst_link = os.path.join(dst, linked_folder)\n abs_path = os.path.realpath(dst_link)\n if not os.path.exists(abs_path):\n os.remove(dst_link)\n\n @staticmethod\n def _copy_files(files, src, dst, keep_path, symlinks):\n \"\"\" executes a multiple file copy from [(src_file, dst_file), (..)]\n managing symlinks if necessary\n \"\"\"\n copied_files = []\n for filename in files:\n abs_src_name = os.path.join(src, filename)\n filename = filename if keep_path else os.path.basename(filename)\n abs_dst_name = os.path.normpath(os.path.join(dst, filename))\n try:\n os.makedirs(os.path.dirname(abs_dst_name))\n except:\n pass\n if symlinks and os.path.islink(abs_src_name):\n linkto = os.readlink(abs_src_name) # @UndefinedVariable\n try:\n os.remove(abs_dst_name)\n except OSError:\n pass\n os.symlink(linkto, abs_dst_name) # @UndefinedVariable\n else:\n shutil.copy2(abs_src_name, abs_dst_name)\n copied_files.append(abs_dst_name)\n return copied_files\n", "path": "conans/client/file_copier.py"}]} | 2,982 | 181 |
gh_patches_debug_16494 | rasdani/github-patches | git_diff | googleapis__google-api-python-client-1083 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add exception handling to docs
Hi :)
I was reading the [docs](https://github.com/googleapis/google-api-python-client/blob/master/docs/start.md) looking for an example to handle exceptions from when request.execute() goes wrong e.g. a 403 due to
Exceeding qouta limits.
I would like for the docs to be updated with a try: and except: like this
``` python
try:
response = request.execute()
except HttpError as e:
logger.error('Error response status code %d, reason %s:', e.resp.status, e.content)
return {'error': 403, 'body' : 'YouTube API Data v3 qouta limit exceeded'}
```
or something else in the `except` block
If you're happy with this I'd like to contribute this as a first timer to open source?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `googleapiclient/errors.py`
Content:
```
1 # Copyright 2014 Google Inc. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Errors for the library.
16
17 All exceptions defined by the library
18 should be defined in this file.
19 """
20 from __future__ import absolute_import
21
22 __author__ = "[email protected] (Joe Gregorio)"
23
24 import json
25
26 from googleapiclient import _helpers as util
27
28
29 class Error(Exception):
30 """Base error for this module."""
31
32 pass
33
34
35 class HttpError(Error):
36 """HTTP data was invalid or unexpected."""
37
38 @util.positional(3)
39 def __init__(self, resp, content, uri=None):
40 self.resp = resp
41 if not isinstance(content, bytes):
42 raise TypeError("HTTP content should be bytes")
43 self.content = content
44 self.uri = uri
45 self.error_details = ""
46
47 def _get_reason(self):
48 """Calculate the reason for the error from the response content."""
49 reason = self.resp.reason
50 try:
51 data = json.loads(self.content.decode("utf-8"))
52 if isinstance(data, dict):
53 reason = data["error"]["message"]
54 if "details" in data["error"]:
55 self.error_details = data["error"]["details"]
56 elif "detail" in data["error"]:
57 self.error_details = data["error"]["detail"]
58 elif isinstance(data, list) and len(data) > 0:
59 first_error = data[0]
60 reason = first_error["error"]["message"]
61 if "details" in first_error["error"]:
62 self.error_details = first_error["error"]["details"]
63 except (ValueError, KeyError, TypeError):
64 pass
65 if reason is None:
66 reason = ""
67 return reason
68
69 def __repr__(self):
70 reason = self._get_reason()
71 if self.error_details:
72 return '<HttpError %s when requesting %s returned "%s". Details: "%s">' % (
73 self.resp.status,
74 self.uri,
75 reason.strip(),
76 self.error_details,
77 )
78 elif self.uri:
79 return '<HttpError %s when requesting %s returned "%s">' % (
80 self.resp.status,
81 self.uri,
82 self._get_reason().strip(),
83 )
84 else:
85 return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
86
87 __str__ = __repr__
88
89
90 class InvalidJsonError(Error):
91 """The JSON returned could not be parsed."""
92
93 pass
94
95
96 class UnknownFileType(Error):
97 """File type unknown or unexpected."""
98
99 pass
100
101
102 class UnknownLinkType(Error):
103 """Link type unknown or unexpected."""
104
105 pass
106
107
108 class UnknownApiNameOrVersion(Error):
109 """No API with that name and version exists."""
110
111 pass
112
113
114 class UnacceptableMimeTypeError(Error):
115 """That is an unacceptable mimetype for this operation."""
116
117 pass
118
119
120 class MediaUploadSizeError(Error):
121 """Media is larger than the method can accept."""
122
123 pass
124
125
126 class ResumableUploadError(HttpError):
127 """Error occurred during resumable upload."""
128
129 pass
130
131
132 class InvalidChunkSizeError(Error):
133 """The given chunksize is not valid."""
134
135 pass
136
137
138 class InvalidNotificationError(Error):
139 """The channel Notification is invalid."""
140
141 pass
142
143
144 class BatchError(HttpError):
145 """Error occurred during batch operations."""
146
147 @util.positional(2)
148 def __init__(self, reason, resp=None, content=None):
149 self.resp = resp
150 self.content = content
151 self.reason = reason
152
153 def __repr__(self):
154 if getattr(self.resp, "status", None) is None:
155 return '<BatchError "%s">' % (self.reason)
156 else:
157 return '<BatchError %s "%s">' % (self.resp.status, self.reason)
158
159 __str__ = __repr__
160
161
162 class UnexpectedMethodError(Error):
163 """Exception raised by RequestMockBuilder on unexpected calls."""
164
165 @util.positional(1)
166 def __init__(self, methodId=None):
167 """Constructor for an UnexpectedMethodError."""
168 super(UnexpectedMethodError, self).__init__(
169 "Received unexpected call %s" % methodId
170 )
171
172
173 class UnexpectedBodyError(Error):
174 """Exception raised by RequestMockBuilder on unexpected bodies."""
175
176 def __init__(self, expected, provided):
177 """Constructor for an UnexpectedMethodError."""
178 super(UnexpectedBodyError, self).__init__(
179 "Expected: [%s] - Provided: [%s]" % (expected, provided)
180 )
181
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py
--- a/googleapiclient/errors.py
+++ b/googleapiclient/errors.py
@@ -51,10 +51,9 @@
data = json.loads(self.content.decode("utf-8"))
if isinstance(data, dict):
reason = data["error"]["message"]
- if "details" in data["error"]:
- self.error_details = data["error"]["details"]
- elif "detail" in data["error"]:
- self.error_details = data["error"]["detail"]
+ error_detail_keyword = next((kw for kw in ["detail", "details", "message"] if kw in data["error"]), "")
+ if error_detail_keyword:
+ self.error_details = data["error"][error_detail_keyword]
elif isinstance(data, list) and len(data) > 0:
first_error = data[0]
reason = first_error["error"]["message"]
| {"golden_diff": "diff --git a/googleapiclient/errors.py b/googleapiclient/errors.py\n--- a/googleapiclient/errors.py\n+++ b/googleapiclient/errors.py\n@@ -51,10 +51,9 @@\n data = json.loads(self.content.decode(\"utf-8\"))\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n- if \"details\" in data[\"error\"]:\n- self.error_details = data[\"error\"][\"details\"]\n- elif \"detail\" in data[\"error\"]:\n- self.error_details = data[\"error\"][\"detail\"]\n+ error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"message\"] if kw in data[\"error\"]), \"\")\n+ if error_detail_keyword:\n+ self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n first_error = data[0]\n reason = first_error[\"error\"][\"message\"]\n", "issue": "Add exception handling to docs\nHi :) \r\n\r\n\r\nI was reading the [docs](https://github.com/googleapis/google-api-python-client/blob/master/docs/start.md) looking for an example to handle exceptions from when request.execute() goes wrong e.g. a 403 due to \r\nExceeding qouta limits.\r\n\r\n\r\nI would like for the docs to be updated with a try: and except: like this\r\n``` python\r\n try:\r\n response = request.execute()\r\n except HttpError as e:\r\n logger.error('Error response status code %d, reason %s:', e.resp.status, e.content)\r\n return {'error': 403, 'body' : 'YouTube API Data v3 qouta limit exceeded'}\r\n```\r\nor something else in the `except` block\r\n \r\nIf you're happy with this I'd like to contribute this as a first timer to open source?\r\n\n", "before_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"\nfrom __future__ import absolute_import\n\n__author__ = \"[email protected] (Joe Gregorio)\"\n\nimport json\n\nfrom googleapiclient import _helpers as util\n\n\nclass Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n\n pass\n\n\nclass HttpError(Error):\n \"\"\"HTTP data was invalid or unexpected.\"\"\"\n\n @util.positional(3)\n def __init__(self, resp, content, uri=None):\n self.resp = resp\n if not isinstance(content, bytes):\n raise TypeError(\"HTTP content should be bytes\")\n self.content = content\n self.uri = uri\n self.error_details = \"\"\n\n def _get_reason(self):\n \"\"\"Calculate the reason for the error from the response content.\"\"\"\n reason = self.resp.reason\n try:\n data = json.loads(self.content.decode(\"utf-8\"))\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n if \"details\" in data[\"error\"]:\n self.error_details = data[\"error\"][\"details\"]\n elif \"detail\" in data[\"error\"]:\n self.error_details = data[\"error\"][\"detail\"]\n elif isinstance(data, list) and len(data) > 0:\n first_error = data[0]\n reason = first_error[\"error\"][\"message\"]\n if \"details\" in first_error[\"error\"]:\n self.error_details = first_error[\"error\"][\"details\"]\n except (ValueError, KeyError, TypeError):\n pass\n if reason is None:\n reason = \"\"\n return reason\n\n def __repr__(self):\n reason = self._get_reason()\n if self.error_details:\n return '<HttpError %s when requesting %s returned \"%s\". Details: \"%s\">' % (\n self.resp.status,\n self.uri,\n reason.strip(),\n self.error_details,\n )\n elif self.uri:\n return '<HttpError %s when requesting %s returned \"%s\">' % (\n self.resp.status,\n self.uri,\n self._get_reason().strip(),\n )\n else:\n return '<HttpError %s \"%s\">' % (self.resp.status, self._get_reason())\n\n __str__ = __repr__\n\n\nclass InvalidJsonError(Error):\n \"\"\"The JSON returned could not be parsed.\"\"\"\n\n pass\n\n\nclass UnknownFileType(Error):\n \"\"\"File type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownLinkType(Error):\n \"\"\"Link type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownApiNameOrVersion(Error):\n \"\"\"No API with that name and version exists.\"\"\"\n\n pass\n\n\nclass UnacceptableMimeTypeError(Error):\n \"\"\"That is an unacceptable mimetype for this operation.\"\"\"\n\n pass\n\n\nclass MediaUploadSizeError(Error):\n \"\"\"Media is larger than the method can accept.\"\"\"\n\n pass\n\n\nclass ResumableUploadError(HttpError):\n \"\"\"Error occurred during resumable upload.\"\"\"\n\n pass\n\n\nclass InvalidChunkSizeError(Error):\n \"\"\"The given chunksize is not valid.\"\"\"\n\n pass\n\n\nclass InvalidNotificationError(Error):\n \"\"\"The channel Notification is invalid.\"\"\"\n\n pass\n\n\nclass BatchError(HttpError):\n \"\"\"Error occurred during batch operations.\"\"\"\n\n @util.positional(2)\n def __init__(self, reason, resp=None, content=None):\n self.resp = resp\n self.content = content\n self.reason = reason\n\n def __repr__(self):\n if getattr(self.resp, \"status\", None) is None:\n return '<BatchError \"%s\">' % (self.reason)\n else:\n return '<BatchError %s \"%s\">' % (self.resp.status, self.reason)\n\n __str__ = __repr__\n\n\nclass UnexpectedMethodError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected calls.\"\"\"\n\n @util.positional(1)\n def __init__(self, methodId=None):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedMethodError, self).__init__(\n \"Received unexpected call %s\" % methodId\n )\n\n\nclass UnexpectedBodyError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected bodies.\"\"\"\n\n def __init__(self, expected, provided):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedBodyError, self).__init__(\n \"Expected: [%s] - Provided: [%s]\" % (expected, provided)\n )\n", "path": "googleapiclient/errors.py"}], "after_files": [{"content": "# Copyright 2014 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Errors for the library.\n\nAll exceptions defined by the library\nshould be defined in this file.\n\"\"\"\nfrom __future__ import absolute_import\n\n__author__ = \"[email protected] (Joe Gregorio)\"\n\nimport json\n\nfrom googleapiclient import _helpers as util\n\n\nclass Error(Exception):\n \"\"\"Base error for this module.\"\"\"\n\n pass\n\n\nclass HttpError(Error):\n \"\"\"HTTP data was invalid or unexpected.\"\"\"\n\n @util.positional(3)\n def __init__(self, resp, content, uri=None):\n self.resp = resp\n if not isinstance(content, bytes):\n raise TypeError(\"HTTP content should be bytes\")\n self.content = content\n self.uri = uri\n self.error_details = \"\"\n\n def _get_reason(self):\n \"\"\"Calculate the reason for the error from the response content.\"\"\"\n reason = self.resp.reason\n try:\n data = json.loads(self.content.decode(\"utf-8\"))\n if isinstance(data, dict):\n reason = data[\"error\"][\"message\"]\n error_detail_keyword = next((kw for kw in [\"detail\", \"details\", \"message\"] if kw in data[\"error\"]), \"\")\n if error_detail_keyword:\n self.error_details = data[\"error\"][error_detail_keyword]\n elif isinstance(data, list) and len(data) > 0:\n first_error = data[0]\n reason = first_error[\"error\"][\"message\"]\n if \"details\" in first_error[\"error\"]:\n self.error_details = first_error[\"error\"][\"details\"]\n except (ValueError, KeyError, TypeError):\n pass\n if reason is None:\n reason = \"\"\n return reason\n\n def __repr__(self):\n reason = self._get_reason()\n if self.error_details:\n return '<HttpError %s when requesting %s returned \"%s\". Details: \"%s\">' % (\n self.resp.status,\n self.uri,\n reason.strip(),\n self.error_details,\n )\n elif self.uri:\n return '<HttpError %s when requesting %s returned \"%s\">' % (\n self.resp.status,\n self.uri,\n self._get_reason().strip(),\n )\n else:\n return '<HttpError %s \"%s\">' % (self.resp.status, self._get_reason())\n\n __str__ = __repr__\n\n\nclass InvalidJsonError(Error):\n \"\"\"The JSON returned could not be parsed.\"\"\"\n\n pass\n\n\nclass UnknownFileType(Error):\n \"\"\"File type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownLinkType(Error):\n \"\"\"Link type unknown or unexpected.\"\"\"\n\n pass\n\n\nclass UnknownApiNameOrVersion(Error):\n \"\"\"No API with that name and version exists.\"\"\"\n\n pass\n\n\nclass UnacceptableMimeTypeError(Error):\n \"\"\"That is an unacceptable mimetype for this operation.\"\"\"\n\n pass\n\n\nclass MediaUploadSizeError(Error):\n \"\"\"Media is larger than the method can accept.\"\"\"\n\n pass\n\n\nclass ResumableUploadError(HttpError):\n \"\"\"Error occurred during resumable upload.\"\"\"\n\n pass\n\n\nclass InvalidChunkSizeError(Error):\n \"\"\"The given chunksize is not valid.\"\"\"\n\n pass\n\n\nclass InvalidNotificationError(Error):\n \"\"\"The channel Notification is invalid.\"\"\"\n\n pass\n\n\nclass BatchError(HttpError):\n \"\"\"Error occurred during batch operations.\"\"\"\n\n @util.positional(2)\n def __init__(self, reason, resp=None, content=None):\n self.resp = resp\n self.content = content\n self.reason = reason\n\n def __repr__(self):\n if getattr(self.resp, \"status\", None) is None:\n return '<BatchError \"%s\">' % (self.reason)\n else:\n return '<BatchError %s \"%s\">' % (self.resp.status, self.reason)\n\n __str__ = __repr__\n\n\nclass UnexpectedMethodError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected calls.\"\"\"\n\n @util.positional(1)\n def __init__(self, methodId=None):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedMethodError, self).__init__(\n \"Received unexpected call %s\" % methodId\n )\n\n\nclass UnexpectedBodyError(Error):\n \"\"\"Exception raised by RequestMockBuilder on unexpected bodies.\"\"\"\n\n def __init__(self, expected, provided):\n \"\"\"Constructor for an UnexpectedMethodError.\"\"\"\n super(UnexpectedBodyError, self).__init__(\n \"Expected: [%s] - Provided: [%s]\" % (expected, provided)\n )\n", "path": "googleapiclient/errors.py"}]} | 1,977 | 207 |
gh_patches_debug_14573 | rasdani/github-patches | git_diff | ethereum__web3.py-996 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add python 3.7 to CI tests
### What was wrong?
python 3.7 is out, and we should include it in our testing.
### How can it be fixed?
add python 3.7 to our tox.ini & circleci config
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8
9 setup(
10 name='web3',
11 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
12 version='4.5.0',
13 description="""Web3.py""",
14 long_description_markdown_filename='README.md',
15 author='Piper Merriam',
16 author_email='[email protected]',
17 url='https://github.com/ethereum/web3.py',
18 include_package_data=True,
19 install_requires=[
20 "toolz>=0.9.0,<1.0.0;implementation_name=='pypy'",
21 "cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'",
22 "eth-abi>=1.1.1,<2",
23 "eth-account>=0.2.1,<0.4.0",
24 "eth-utils>=1.0.1,<2.0.0",
25 "hexbytes>=0.1.0,<1.0.0",
26 "lru-dict>=1.1.6,<2.0.0",
27 "eth-hash[pycryptodome]",
28 "requests>=2.16.0,<3.0.0",
29 "websockets>=5.0.1,<6.0.0",
30 "pypiwin32>=223;platform_system=='Windows'",
31 ],
32 setup_requires=['setuptools-markdown'],
33 python_requires='>=3.5, <4',
34 extras_require={
35 'tester': [
36 "eth-tester[py-evm]==0.1.0-beta.30",
37 "py-geth>=2.0.1,<3.0.0",
38 ],
39 'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
40 'linter': [
41 "flake8==3.4.1",
42 "isort>=4.2.15,<5",
43 ],
44 },
45 py_modules=['web3', 'ens'],
46 license="MIT",
47 zip_safe=False,
48 keywords='ethereum',
49 packages=find_packages(exclude=["tests", "tests.*"]),
50 classifiers=[
51 'Development Status :: 5 - Production/Stable',
52 'Intended Audience :: Developers',
53 'License :: OSI Approved :: MIT License',
54 'Natural Language :: English',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 ],
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,14 +26,14 @@
"lru-dict>=1.1.6,<2.0.0",
"eth-hash[pycryptodome]",
"requests>=2.16.0,<3.0.0",
- "websockets>=5.0.1,<6.0.0",
+ "websockets>=6.0.0,<7.0.0",
"pypiwin32>=223;platform_system=='Windows'",
],
setup_requires=['setuptools-markdown'],
python_requires='>=3.5, <4',
extras_require={
'tester': [
- "eth-tester[py-evm]==0.1.0-beta.30",
+ "eth-tester[py-evm]==0.1.0-beta.31",
"py-geth>=2.0.1,<3.0.0",
],
'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,14 +26,14 @@\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n- \"websockets>=5.0.1,<6.0.0\",\n+ \"websockets>=6.0.0,<7.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n- \"eth-tester[py-evm]==0.1.0-beta.30\",\n+ \"eth-tester[py-evm]==0.1.0-beta.31\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n", "issue": "Add python 3.7 to CI tests\n### What was wrong?\r\npython 3.7 is out, and we should include it in our testing.\r\n\r\n\r\n### How can it be fixed?\r\n\r\nadd python 3.7 to our tox.ini & circleci config\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.5.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.4.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=5.0.1,<6.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.30\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.5.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.4.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=6.0.0,<7.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.31\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]} | 989 | 249 |
gh_patches_debug_187 | rasdani/github-patches | git_diff | CTFd__CTFd-863 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
get_config return default
get_config(key) should probably be get_config(key, default=None). This helps in some ideas where you want to do different behavior if get_config returns None.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `CTFd/__init__.py`
Content:
```
1 import sys
2 import os
3
4 from distutils.version import StrictVersion
5 from flask import Flask, Request
6 from werkzeug.utils import cached_property
7 from werkzeug.contrib.fixers import ProxyFix
8 from jinja2 import FileSystemLoader
9 from jinja2.sandbox import SandboxedEnvironment
10 from six.moves import input
11
12 from CTFd import utils
13 from CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database
14 from CTFd.utils.sessions import CachingSessionInterface
15 from CTFd.utils.updates import update_check
16 from CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals, init_logs
17 from CTFd.utils.events import socketio
18 from CTFd.plugins import init_plugins
19
20 # Hack to support Unicode in Python 2 properly
21 if sys.version_info[0] < 3:
22 reload(sys)
23 sys.setdefaultencoding("utf-8")
24
25 __version__ = '2.0.3'
26
27
28 class CTFdRequest(Request):
29 @cached_property
30 def path(self):
31 """
32 Hijack the original Flask request path because it does not account for subdirectory deployments in an intuitive
33 manner. We append script_root so that the path always points to the full path as seen in the browser.
34 e.g. /subdirectory/path/route vs /path/route
35
36 :return: string
37 """
38 return self.script_root + super(CTFdRequest, self).path
39
40
41 class CTFdFlask(Flask):
42 def __init__(self, *args, **kwargs):
43 """Overriden Jinja constructor setting a custom jinja_environment"""
44 self.jinja_environment = SandboxedBaseEnvironment
45 self.session_interface = CachingSessionInterface(key_prefix='session')
46 self.request_class = CTFdRequest
47 Flask.__init__(self, *args, **kwargs)
48
49 def create_jinja_environment(self):
50 """Overridden jinja environment constructor"""
51 return super(CTFdFlask, self).create_jinja_environment()
52
53
54 class SandboxedBaseEnvironment(SandboxedEnvironment):
55 """SandboxEnvironment that mimics the Flask BaseEnvironment"""
56 def __init__(self, app, **options):
57 if 'loader' not in options:
58 options['loader'] = app.create_global_jinja_loader()
59 # Disable cache entirely so that themes can be switched (#662)
60 # If the cache is enabled, switching themes will cause odd rendering errors
61 SandboxedEnvironment.__init__(self, cache_size=0, **options)
62 self.app = app
63
64
65 class ThemeLoader(FileSystemLoader):
66 """Custom FileSystemLoader that switches themes based on the configuration value"""
67 def __init__(self, searchpath, encoding='utf-8', followlinks=False):
68 super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)
69 self.overriden_templates = {}
70
71 def get_source(self, environment, template):
72 # Check if the template has been overriden
73 if template in self.overriden_templates:
74 return self.overriden_templates[template], template, True
75
76 # Check if the template requested is for the admin panel
77 if template.startswith('admin/'):
78 template = template[6:] # Strip out admin/
79 template = "/".join(['admin', 'templates', template])
80 return super(ThemeLoader, self).get_source(environment, template)
81
82 # Load regular theme data
83 theme = utils.get_config('ctf_theme')
84 template = "/".join([theme, 'templates', template])
85 return super(ThemeLoader, self).get_source(environment, template)
86
87
88 def confirm_upgrade():
89 if sys.stdin.isatty():
90 print("/*\\ CTFd has updated and must update the database! /*\\")
91 print("/*\\ Please backup your database before proceeding! /*\\")
92 print("/*\\ CTFd maintainers are not responsible for any data loss! /*\\")
93 if input('Run database migrations (Y/N)').lower().strip() == 'y':
94 return True
95 else:
96 print('/*\\ Ignored database migrations... /*\\')
97 return False
98 else:
99 return True
100
101
102 def run_upgrade():
103 upgrade()
104 utils.set_config('ctf_version', __version__)
105
106
107 def create_app(config='CTFd.config.Config'):
108 app = CTFdFlask(__name__)
109 with app.app_context():
110 app.config.from_object(config)
111
112 theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)
113 app.jinja_loader = theme_loader
114
115 from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking
116
117 url = create_database()
118
119 # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in
120 # This is mostly so we can force MySQL's charset
121 app.config['SQLALCHEMY_DATABASE_URI'] = str(url)
122
123 # Register database
124 db.init_app(app)
125
126 # Register Flask-Migrate
127 migrations.init_app(app, db)
128
129 # Alembic sqlite support is lacking so we should just create_all anyway
130 if url.drivername.startswith('sqlite'):
131 db.create_all()
132 stamp()
133 else:
134 # This creates tables instead of db.create_all()
135 # Allows migrations to happen properly
136 upgrade()
137
138 from CTFd.models import ma
139
140 ma.init_app(app)
141
142 app.db = db
143 app.VERSION = __version__
144
145 from CTFd.cache import cache
146
147 cache.init_app(app)
148 app.cache = cache
149
150 # If you have multiple workers you must have a shared cache
151 socketio.init_app(
152 app,
153 async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),
154 message_queue=app.config.get('CACHE_REDIS_URL')
155 )
156
157 if app.config.get('REVERSE_PROXY'):
158 app.wsgi_app = ProxyFix(app.wsgi_app)
159
160 version = utils.get_config('ctf_version')
161
162 # Upgrading from an older version of CTFd
163 if version and (StrictVersion(version) < StrictVersion(__version__)):
164 if confirm_upgrade():
165 run_upgrade()
166 else:
167 exit()
168
169 if not version:
170 utils.set_config('ctf_version', __version__)
171
172 if not utils.get_config('ctf_theme'):
173 utils.set_config('ctf_theme', 'core')
174
175 update_check(force=True)
176
177 init_request_processors(app)
178 init_template_filters(app)
179 init_template_globals(app)
180
181 # Importing here allows tests to use sensible names (e.g. api instead of api_bp)
182 from CTFd.views import views
183 from CTFd.teams import teams
184 from CTFd.users import users
185 from CTFd.challenges import challenges
186 from CTFd.scoreboard import scoreboard
187 from CTFd.auth import auth
188 from CTFd.admin import admin
189 from CTFd.api import api
190 from CTFd.events import events
191 from CTFd.errors import page_not_found, forbidden, general_error, gateway_error
192
193 app.register_blueprint(views)
194 app.register_blueprint(teams)
195 app.register_blueprint(users)
196 app.register_blueprint(challenges)
197 app.register_blueprint(scoreboard)
198 app.register_blueprint(auth)
199 app.register_blueprint(api)
200 app.register_blueprint(events)
201
202 app.register_blueprint(admin)
203
204 app.register_error_handler(404, page_not_found)
205 app.register_error_handler(403, forbidden)
206 app.register_error_handler(500, general_error)
207 app.register_error_handler(502, gateway_error)
208
209 init_logs(app)
210 init_plugins(app)
211
212 return app
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/CTFd/__init__.py b/CTFd/__init__.py
--- a/CTFd/__init__.py
+++ b/CTFd/__init__.py
@@ -22,7 +22,7 @@
reload(sys)
sys.setdefaultencoding("utf-8")
-__version__ = '2.0.3'
+__version__ = '2.0.4'
class CTFdRequest(Request):
| {"golden_diff": "diff --git a/CTFd/__init__.py b/CTFd/__init__.py\n--- a/CTFd/__init__.py\n+++ b/CTFd/__init__.py\n@@ -22,7 +22,7 @@\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n \n-__version__ = '2.0.3'\n+__version__ = '2.0.4'\n \n \n class CTFdRequest(Request):\n", "issue": "get_config return default\nget_config(key) should probably be get_config(key, default=None). This helps in some ideas where you want to do different behavior if get_config returns None. \n", "before_files": [{"content": "import sys\nimport os\n\nfrom distutils.version import StrictVersion\nfrom flask import Flask, Request\nfrom werkzeug.utils import cached_property\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom jinja2 import FileSystemLoader\nfrom jinja2.sandbox import SandboxedEnvironment\nfrom six.moves import input\n\nfrom CTFd import utils\nfrom CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database\nfrom CTFd.utils.sessions import CachingSessionInterface\nfrom CTFd.utils.updates import update_check\nfrom CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals, init_logs\nfrom CTFd.utils.events import socketio\nfrom CTFd.plugins import init_plugins\n\n# Hack to support Unicode in Python 2 properly\nif sys.version_info[0] < 3:\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\n__version__ = '2.0.3'\n\n\nclass CTFdRequest(Request):\n @cached_property\n def path(self):\n \"\"\"\n Hijack the original Flask request path because it does not account for subdirectory deployments in an intuitive\n manner. We append script_root so that the path always points to the full path as seen in the browser.\n e.g. /subdirectory/path/route vs /path/route\n\n :return: string\n \"\"\"\n return self.script_root + super(CTFdRequest, self).path\n\n\nclass CTFdFlask(Flask):\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden Jinja constructor setting a custom jinja_environment\"\"\"\n self.jinja_environment = SandboxedBaseEnvironment\n self.session_interface = CachingSessionInterface(key_prefix='session')\n self.request_class = CTFdRequest\n Flask.__init__(self, *args, **kwargs)\n\n def create_jinja_environment(self):\n \"\"\"Overridden jinja environment constructor\"\"\"\n return super(CTFdFlask, self).create_jinja_environment()\n\n\nclass SandboxedBaseEnvironment(SandboxedEnvironment):\n \"\"\"SandboxEnvironment that mimics the Flask BaseEnvironment\"\"\"\n def __init__(self, app, **options):\n if 'loader' not in options:\n options['loader'] = app.create_global_jinja_loader()\n # Disable cache entirely so that themes can be switched (#662)\n # If the cache is enabled, switching themes will cause odd rendering errors\n SandboxedEnvironment.__init__(self, cache_size=0, **options)\n self.app = app\n\n\nclass ThemeLoader(FileSystemLoader):\n \"\"\"Custom FileSystemLoader that switches themes based on the configuration value\"\"\"\n def __init__(self, searchpath, encoding='utf-8', followlinks=False):\n super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)\n self.overriden_templates = {}\n\n def get_source(self, environment, template):\n # Check if the template has been overriden\n if template in self.overriden_templates:\n return self.overriden_templates[template], template, True\n\n # Check if the template requested is for the admin panel\n if template.startswith('admin/'):\n template = template[6:] # Strip out admin/\n template = \"/\".join(['admin', 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n # Load regular theme data\n theme = utils.get_config('ctf_theme')\n template = \"/\".join([theme, 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n\ndef confirm_upgrade():\n if sys.stdin.isatty():\n print(\"/*\\\\ CTFd has updated and must update the database! /*\\\\\")\n print(\"/*\\\\ Please backup your database before proceeding! /*\\\\\")\n print(\"/*\\\\ CTFd maintainers are not responsible for any data loss! /*\\\\\")\n if input('Run database migrations (Y/N)').lower().strip() == 'y':\n return True\n else:\n print('/*\\\\ Ignored database migrations... /*\\\\')\n return False\n else:\n return True\n\n\ndef run_upgrade():\n upgrade()\n utils.set_config('ctf_version', __version__)\n\n\ndef create_app(config='CTFd.config.Config'):\n app = CTFdFlask(__name__)\n with app.app_context():\n app.config.from_object(config)\n\n theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)\n app.jinja_loader = theme_loader\n\n from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking\n\n url = create_database()\n\n # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in\n # This is mostly so we can force MySQL's charset\n app.config['SQLALCHEMY_DATABASE_URI'] = str(url)\n\n # Register database\n db.init_app(app)\n\n # Register Flask-Migrate\n migrations.init_app(app, db)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n if url.drivername.startswith('sqlite'):\n db.create_all()\n stamp()\n else:\n # This creates tables instead of db.create_all()\n # Allows migrations to happen properly\n upgrade()\n\n from CTFd.models import ma\n\n ma.init_app(app)\n\n app.db = db\n app.VERSION = __version__\n\n from CTFd.cache import cache\n\n cache.init_app(app)\n app.cache = cache\n\n # If you have multiple workers you must have a shared cache\n socketio.init_app(\n app,\n async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),\n message_queue=app.config.get('CACHE_REDIS_URL')\n )\n\n if app.config.get('REVERSE_PROXY'):\n app.wsgi_app = ProxyFix(app.wsgi_app)\n\n version = utils.get_config('ctf_version')\n\n # Upgrading from an older version of CTFd\n if version and (StrictVersion(version) < StrictVersion(__version__)):\n if confirm_upgrade():\n run_upgrade()\n else:\n exit()\n\n if not version:\n utils.set_config('ctf_version', __version__)\n\n if not utils.get_config('ctf_theme'):\n utils.set_config('ctf_theme', 'core')\n\n update_check(force=True)\n\n init_request_processors(app)\n init_template_filters(app)\n init_template_globals(app)\n\n # Importing here allows tests to use sensible names (e.g. api instead of api_bp)\n from CTFd.views import views\n from CTFd.teams import teams\n from CTFd.users import users\n from CTFd.challenges import challenges\n from CTFd.scoreboard import scoreboard\n from CTFd.auth import auth\n from CTFd.admin import admin\n from CTFd.api import api\n from CTFd.events import events\n from CTFd.errors import page_not_found, forbidden, general_error, gateway_error\n\n app.register_blueprint(views)\n app.register_blueprint(teams)\n app.register_blueprint(users)\n app.register_blueprint(challenges)\n app.register_blueprint(scoreboard)\n app.register_blueprint(auth)\n app.register_blueprint(api)\n app.register_blueprint(events)\n\n app.register_blueprint(admin)\n\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(403, forbidden)\n app.register_error_handler(500, general_error)\n app.register_error_handler(502, gateway_error)\n\n init_logs(app)\n init_plugins(app)\n\n return app\n", "path": "CTFd/__init__.py"}], "after_files": [{"content": "import sys\nimport os\n\nfrom distutils.version import StrictVersion\nfrom flask import Flask, Request\nfrom werkzeug.utils import cached_property\nfrom werkzeug.contrib.fixers import ProxyFix\nfrom jinja2 import FileSystemLoader\nfrom jinja2.sandbox import SandboxedEnvironment\nfrom six.moves import input\n\nfrom CTFd import utils\nfrom CTFd.utils.migrations import migrations, migrate, upgrade, stamp, create_database\nfrom CTFd.utils.sessions import CachingSessionInterface\nfrom CTFd.utils.updates import update_check\nfrom CTFd.utils.initialization import init_request_processors, init_template_filters, init_template_globals, init_logs\nfrom CTFd.utils.events import socketio\nfrom CTFd.plugins import init_plugins\n\n# Hack to support Unicode in Python 2 properly\nif sys.version_info[0] < 3:\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n\n__version__ = '2.0.4'\n\n\nclass CTFdRequest(Request):\n @cached_property\n def path(self):\n \"\"\"\n Hijack the original Flask request path because it does not account for subdirectory deployments in an intuitive\n manner. We append script_root so that the path always points to the full path as seen in the browser.\n e.g. /subdirectory/path/route vs /path/route\n\n :return: string\n \"\"\"\n return self.script_root + super(CTFdRequest, self).path\n\n\nclass CTFdFlask(Flask):\n def __init__(self, *args, **kwargs):\n \"\"\"Overriden Jinja constructor setting a custom jinja_environment\"\"\"\n self.jinja_environment = SandboxedBaseEnvironment\n self.session_interface = CachingSessionInterface(key_prefix='session')\n self.request_class = CTFdRequest\n Flask.__init__(self, *args, **kwargs)\n\n def create_jinja_environment(self):\n \"\"\"Overridden jinja environment constructor\"\"\"\n return super(CTFdFlask, self).create_jinja_environment()\n\n\nclass SandboxedBaseEnvironment(SandboxedEnvironment):\n \"\"\"SandboxEnvironment that mimics the Flask BaseEnvironment\"\"\"\n def __init__(self, app, **options):\n if 'loader' not in options:\n options['loader'] = app.create_global_jinja_loader()\n # Disable cache entirely so that themes can be switched (#662)\n # If the cache is enabled, switching themes will cause odd rendering errors\n SandboxedEnvironment.__init__(self, cache_size=0, **options)\n self.app = app\n\n\nclass ThemeLoader(FileSystemLoader):\n \"\"\"Custom FileSystemLoader that switches themes based on the configuration value\"\"\"\n def __init__(self, searchpath, encoding='utf-8', followlinks=False):\n super(ThemeLoader, self).__init__(searchpath, encoding, followlinks)\n self.overriden_templates = {}\n\n def get_source(self, environment, template):\n # Check if the template has been overriden\n if template in self.overriden_templates:\n return self.overriden_templates[template], template, True\n\n # Check if the template requested is for the admin panel\n if template.startswith('admin/'):\n template = template[6:] # Strip out admin/\n template = \"/\".join(['admin', 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n # Load regular theme data\n theme = utils.get_config('ctf_theme')\n template = \"/\".join([theme, 'templates', template])\n return super(ThemeLoader, self).get_source(environment, template)\n\n\ndef confirm_upgrade():\n if sys.stdin.isatty():\n print(\"/*\\\\ CTFd has updated and must update the database! /*\\\\\")\n print(\"/*\\\\ Please backup your database before proceeding! /*\\\\\")\n print(\"/*\\\\ CTFd maintainers are not responsible for any data loss! /*\\\\\")\n if input('Run database migrations (Y/N)').lower().strip() == 'y':\n return True\n else:\n print('/*\\\\ Ignored database migrations... /*\\\\')\n return False\n else:\n return True\n\n\ndef run_upgrade():\n upgrade()\n utils.set_config('ctf_version', __version__)\n\n\ndef create_app(config='CTFd.config.Config'):\n app = CTFdFlask(__name__)\n with app.app_context():\n app.config.from_object(config)\n\n theme_loader = ThemeLoader(os.path.join(app.root_path, 'themes'), followlinks=True)\n app.jinja_loader = theme_loader\n\n from CTFd.models import db, Teams, Solves, Challenges, Fails, Flags, Tags, Files, Tracking\n\n url = create_database()\n\n # This allows any changes to the SQLALCHEMY_DATABASE_URI to get pushed back in\n # This is mostly so we can force MySQL's charset\n app.config['SQLALCHEMY_DATABASE_URI'] = str(url)\n\n # Register database\n db.init_app(app)\n\n # Register Flask-Migrate\n migrations.init_app(app, db)\n\n # Alembic sqlite support is lacking so we should just create_all anyway\n if url.drivername.startswith('sqlite'):\n db.create_all()\n stamp()\n else:\n # This creates tables instead of db.create_all()\n # Allows migrations to happen properly\n upgrade()\n\n from CTFd.models import ma\n\n ma.init_app(app)\n\n app.db = db\n app.VERSION = __version__\n\n from CTFd.cache import cache\n\n cache.init_app(app)\n app.cache = cache\n\n # If you have multiple workers you must have a shared cache\n socketio.init_app(\n app,\n async_mode=app.config.get('SOCKETIO_ASYNC_MODE'),\n message_queue=app.config.get('CACHE_REDIS_URL')\n )\n\n if app.config.get('REVERSE_PROXY'):\n app.wsgi_app = ProxyFix(app.wsgi_app)\n\n version = utils.get_config('ctf_version')\n\n # Upgrading from an older version of CTFd\n if version and (StrictVersion(version) < StrictVersion(__version__)):\n if confirm_upgrade():\n run_upgrade()\n else:\n exit()\n\n if not version:\n utils.set_config('ctf_version', __version__)\n\n if not utils.get_config('ctf_theme'):\n utils.set_config('ctf_theme', 'core')\n\n update_check(force=True)\n\n init_request_processors(app)\n init_template_filters(app)\n init_template_globals(app)\n\n # Importing here allows tests to use sensible names (e.g. api instead of api_bp)\n from CTFd.views import views\n from CTFd.teams import teams\n from CTFd.users import users\n from CTFd.challenges import challenges\n from CTFd.scoreboard import scoreboard\n from CTFd.auth import auth\n from CTFd.admin import admin\n from CTFd.api import api\n from CTFd.events import events\n from CTFd.errors import page_not_found, forbidden, general_error, gateway_error\n\n app.register_blueprint(views)\n app.register_blueprint(teams)\n app.register_blueprint(users)\n app.register_blueprint(challenges)\n app.register_blueprint(scoreboard)\n app.register_blueprint(auth)\n app.register_blueprint(api)\n app.register_blueprint(events)\n\n app.register_blueprint(admin)\n\n app.register_error_handler(404, page_not_found)\n app.register_error_handler(403, forbidden)\n app.register_error_handler(500, general_error)\n app.register_error_handler(502, gateway_error)\n\n init_logs(app)\n init_plugins(app)\n\n return app\n", "path": "CTFd/__init__.py"}]} | 2,486 | 98 |
gh_patches_debug_16110 | rasdani/github-patches | git_diff | mampfes__hacs_waste_collection_schedule-339 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[bug] [recycleapp_be] UnboundLocalError: local variable 'streetId' referenced before assignment
Hello,
I have a problem with the recycleapp_be source.
I have the error into the log :
```
Logger: waste_collection_schedule.scraper
Source: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:143
Integration: waste_collection_schedule ([documentation](https://github.com/mampfes/hacs_waste_collection_schedule#readme))
First occurred: 15:24:43 (1 occurrences)
Last logged: 15:24:43
fetch failed for source Recycle!: Traceback (most recent call last): File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py", line 141, in fetch entries = self._source.fetch() File "/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py", line 66, in fetch if streetId is None: UnboundLocalError: local variable 'streetId' referenced before assignment
` ``
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py`
Content:
```
1 import logging
2 from datetime import datetime, timedelta
3
4 import requests
5 from waste_collection_schedule import Collection # type: ignore[attr-defined]
6
7 TITLE = "Recycle!"
8 DESCRIPTION = "Source for RecycleApp.be"
9 URL = "https://www.recycleapp.be"
10 TEST_CASES = {
11 "1140 Evere, Bazellaan 1": {
12 "postcode": 1140,
13 "street": "Bazellaan",
14 "house_number": 1,
15 },
16 "3001, Waversebaan 276 with events": {
17 "postcode": 3001,
18 "street": "Waversebaan",
19 "house_number": 276,
20 },
21 "3001, Waversebaan 276 without events": {
22 "postcode": 3001,
23 "street": "Waversebaan",
24 "house_number": 276,
25 "add_events": False,
26 },
27 }
28
29 _LOGGER = logging.getLogger(__name__)
30
31
32 class Source:
33 def __init__(self, postcode, street, house_number, add_events=True):
34 self._postcode = postcode
35 self._street = street
36 self._house_number = house_number
37 self._add_events = add_events
38
39 def fetch(self):
40 url = "https://api.recycleapp.be/api/app/v1"
41 headers = {
42 "x-secret": "Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW",
43 "x-consumer": "recycleapp.be",
44 "User-Agent": "",
45 "Authorization": "",
46 }
47 r = requests.get(f"{url}/access-token", headers=headers)
48 headers["Authorization"] = r.json()["accessToken"]
49
50 params = {"q": self._postcode}
51 r = requests.get(f"{url}/zipcodes", params=params, headers=headers)
52 if r.status_code != 200:
53 _LOGGER.error("Get zip code failed")
54 return []
55 zipcodeId = r.json()["items"][0]["id"]
56
57 params = {"q": self._street, "zipcodes": zipcodeId}
58 r = requests.get(f"{url}/streets", params=params, headers=headers)
59 if r.status_code != 200:
60 _LOGGER.error("Get street id failed")
61 return []
62
63 for item in r.json()["items"]:
64 if item["name"] == self._street:
65 streetId = item["id"]
66 if streetId is None:
67 streetId = r.json()["items"][0]["id"]
68
69 now = datetime.now()
70 fromDate = now.strftime("%Y-%m-%d")
71 untilDate = (now + timedelta(days=365)).strftime("%Y-%m-%d")
72 params = {
73 "zipcodeId": zipcodeId,
74 "streetId": streetId,
75 "houseNumber": self._house_number,
76 "fromDate": fromDate,
77 "untilDate": untilDate,
78 # "size":100,
79 }
80 r = requests.get(f"{url}/collections", params=params, headers=headers)
81 if r.status_code != 200:
82 _LOGGER.error("Get data failed")
83 return []
84
85 entries = []
86 for item in r.json()["items"]:
87 if "exception" in item and "replacedBy" in item["exception"]:
88 continue
89
90 date = datetime.strptime(item["timestamp"], "%Y-%m-%dT%H:%M:%S.000Z").date()
91 if item["type"] == "collection":
92 entries.append(Collection(date, item["fraction"]["name"]["en"]))
93 elif item["type"] == "event" and self._add_events:
94 entries.append(Collection(date, item["event"]["title"]["en"]))
95
96 return entries
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py
@@ -24,6 +24,12 @@
"house_number": 276,
"add_events": False,
},
+ "1400, Rue de namur 1 with events": {
+ "postcode": 1400,
+ "street": "Rue de namur",
+ "house_number": 1,
+ "add_events": True,
+ },
}
_LOGGER = logging.getLogger(__name__)
@@ -60,6 +66,7 @@
_LOGGER.error("Get street id failed")
return []
+ streetId = None
for item in r.json()["items"]:
if item["name"] == self._street:
streetId = item["id"]
| {"golden_diff": "diff --git a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n--- a/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n+++ b/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\n@@ -24,6 +24,12 @@\n \"house_number\": 276,\n \"add_events\": False,\n },\n+ \"1400, Rue de namur 1 with events\": {\n+ \"postcode\": 1400,\n+ \"street\": \"Rue de namur\",\n+ \"house_number\": 1,\n+ \"add_events\": True,\n+ },\n }\n \n _LOGGER = logging.getLogger(__name__)\n@@ -60,6 +66,7 @@\n _LOGGER.error(\"Get street id failed\")\n return []\n \n+ streetId = None\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n", "issue": "[bug] [recycleapp_be] UnboundLocalError: local variable 'streetId' referenced before assignment\nHello, \r\n\r\nI have a problem with the recycleapp_be source.\r\n\r\nI have the error into the log : \r\n\r\n```\r\nLogger: waste_collection_schedule.scraper\r\nSource: custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py:143\r\nIntegration: waste_collection_schedule ([documentation](https://github.com/mampfes/hacs_waste_collection_schedule#readme))\r\nFirst occurred: 15:24:43 (1 occurrences)\r\nLast logged: 15:24:43\r\n\r\nfetch failed for source Recycle!: Traceback (most recent call last): File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/scraper.py\", line 141, in fetch entries = self._source.fetch() File \"/config/custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py\", line 66, in fetch if streetId is None: UnboundLocalError: local variable 'streetId' referenced before assignment\r\n` ``\r\n\n", "before_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n },\n \"3001, Waversebaan 276 with events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n },\n \"3001, Waversebaan 276 without events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n \"add_events\": False,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n self._add_events = add_events\n\n def fetch(self):\n url = \"https://api.recycleapp.be/api/app/v1\"\n headers = {\n \"x-secret\": \"Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get zip code failed\")\n return []\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.get(f\"{url}/streets\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get street id failed\")\n return []\n\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get data failed\")\n return []\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n if item[\"type\"] == \"collection\":\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n elif item[\"type\"] == \"event\" and self._add_events:\n entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}], "after_files": [{"content": "import logging\nfrom datetime import datetime, timedelta\n\nimport requests\nfrom waste_collection_schedule import Collection # type: ignore[attr-defined]\n\nTITLE = \"Recycle!\"\nDESCRIPTION = \"Source for RecycleApp.be\"\nURL = \"https://www.recycleapp.be\"\nTEST_CASES = {\n \"1140 Evere, Bazellaan 1\": {\n \"postcode\": 1140,\n \"street\": \"Bazellaan\",\n \"house_number\": 1,\n },\n \"3001, Waversebaan 276 with events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n },\n \"3001, Waversebaan 276 without events\": {\n \"postcode\": 3001,\n \"street\": \"Waversebaan\",\n \"house_number\": 276,\n \"add_events\": False,\n },\n \"1400, Rue de namur 1 with events\": {\n \"postcode\": 1400,\n \"street\": \"Rue de namur\",\n \"house_number\": 1,\n \"add_events\": True,\n },\n}\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass Source:\n def __init__(self, postcode, street, house_number, add_events=True):\n self._postcode = postcode\n self._street = street\n self._house_number = house_number\n self._add_events = add_events\n\n def fetch(self):\n url = \"https://api.recycleapp.be/api/app/v1\"\n headers = {\n \"x-secret\": \"Crgja3EGWe8jdapyr4EEoMBgZACYYjRRcRpaMQrLDW9HJBvmgkfGQyYqLgeXPavAGvnJqkV87PBB2b8zx43q46sUgzqio4yRZbABhtKeagkVKypTEDjKfPgGycjLyJTtLHYpzwJgp4YmmCuJZN9ZmJY8CGEoFs8MKfdJpU9RjkEVfngmmk2LYD4QzFegLNKUbcCeAdEW\",\n \"x-consumer\": \"recycleapp.be\",\n \"User-Agent\": \"\",\n \"Authorization\": \"\",\n }\n r = requests.get(f\"{url}/access-token\", headers=headers)\n headers[\"Authorization\"] = r.json()[\"accessToken\"]\n\n params = {\"q\": self._postcode}\n r = requests.get(f\"{url}/zipcodes\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get zip code failed\")\n return []\n zipcodeId = r.json()[\"items\"][0][\"id\"]\n\n params = {\"q\": self._street, \"zipcodes\": zipcodeId}\n r = requests.get(f\"{url}/streets\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get street id failed\")\n return []\n\n streetId = None\n for item in r.json()[\"items\"]:\n if item[\"name\"] == self._street:\n streetId = item[\"id\"]\n if streetId is None:\n streetId = r.json()[\"items\"][0][\"id\"]\n\n now = datetime.now()\n fromDate = now.strftime(\"%Y-%m-%d\")\n untilDate = (now + timedelta(days=365)).strftime(\"%Y-%m-%d\")\n params = {\n \"zipcodeId\": zipcodeId,\n \"streetId\": streetId,\n \"houseNumber\": self._house_number,\n \"fromDate\": fromDate,\n \"untilDate\": untilDate,\n # \"size\":100,\n }\n r = requests.get(f\"{url}/collections\", params=params, headers=headers)\n if r.status_code != 200:\n _LOGGER.error(\"Get data failed\")\n return []\n\n entries = []\n for item in r.json()[\"items\"]:\n if \"exception\" in item and \"replacedBy\" in item[\"exception\"]:\n continue\n\n date = datetime.strptime(item[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S.000Z\").date()\n if item[\"type\"] == \"collection\":\n entries.append(Collection(date, item[\"fraction\"][\"name\"][\"en\"]))\n elif item[\"type\"] == \"event\" and self._add_events:\n entries.append(Collection(date, item[\"event\"][\"title\"][\"en\"]))\n\n return entries\n", "path": "custom_components/waste_collection_schedule/waste_collection_schedule/source/recycleapp_be.py"}]} | 1,627 | 240 |
gh_patches_debug_58681 | rasdani/github-patches | git_diff | lightly-ai__lightly-1009 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Loss stuck
Hi, I am trying to run the tutorial posted here
https://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_moco_memory_bank.html
But my loss is stuck at 8.32 after 100 epochs
python 3.9
pytorch-lightning 1.8.1
lightly 1.2.38
Any suggestions on how I should troubleshoot this?
Thanks in advance!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightly/loss/memory_bank.py`
Content:
```
1 """ Memory Bank Wrapper """
2
3 # Copyright (c) 2020. Lightly AG and its affiliates.
4 # All Rights Reserved
5
6 import torch
7 import functools
8
9 class MemoryBankModule(torch.nn.Module):
10 """Memory bank implementation
11
12 This is a parent class to all loss functions implemented by the lightly
13 Python package. This way, any loss can be used with a memory bank if
14 desired.
15
16 Attributes:
17 size:
18 Number of keys the memory bank can store. If set to 0,
19 memory bank is not used.
20
21 Examples:
22 >>> class MyLossFunction(MemoryBankModule):
23 >>>
24 >>> def __init__(self, memory_bank_size: int = 2 ** 16):
25 >>> super(MyLossFunction, self).__init__(memory_bank_size)
26 >>>
27 >>> def forward(self, output: torch.Tensor,
28 >>> labels: torch.Tensor = None):
29 >>>
30 >>> output, negatives = super(
31 >>> MyLossFunction, self).forward(output)
32 >>>
33 >>> if negatives is not None:
34 >>> # evaluate loss with negative samples
35 >>> else:
36 >>> # evaluate loss without negative samples
37
38 """
39
40 def __init__(self, size: int = 2 ** 16):
41
42 super(MemoryBankModule, self).__init__()
43
44 if size < 0:
45 msg = f'Illegal memory bank size {size}, must be non-negative.'
46 raise ValueError(msg)
47
48 self.size = size
49 self.register_buffer("bank", tensor=torch.empty(0, dtype=torch.float), persistent=False)
50 self.register_buffer("bank_ptr", tensor=torch.empty(0, dtype=torch.long), persistent=False)
51
52 @torch.no_grad()
53 def _init_memory_bank(self, dim: int):
54 """Initialize the memory bank if it's empty
55
56 Args:
57 dim:
58 The dimension of the which are stored in the bank.
59
60 """
61 # create memory bank
62 # we could use register buffers like in the moco repo
63 # https://github.com/facebookresearch/moco but we don't
64 # want to pollute our checkpoints
65 self.bank = torch.randn(dim, self.size).type_as(self.bank)
66 torch.nn.functional.normalize(self.bank, dim=0)
67 self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)
68
69 @torch.no_grad()
70 def _dequeue_and_enqueue(self, batch: torch.Tensor):
71 """Dequeue the oldest batch and add the latest one
72
73 Args:
74 batch:
75 The latest batch of keys to add to the memory bank.
76
77 """
78 batch_size = batch.shape[0]
79 ptr = int(self.bank_ptr)
80
81 if ptr + batch_size >= self.size:
82 self.bank[:, ptr:] = batch[:self.size - ptr].T.detach()
83 self.bank_ptr[0] = 0
84 else:
85 self.bank[:, ptr:ptr + batch_size] = batch.T.detach()
86 self.bank_ptr[0] = ptr + batch_size
87
88 def forward(self,
89 output: torch.Tensor,
90 labels: torch.Tensor = None,
91 update: bool = False):
92 """Query memory bank for additional negative samples
93
94 Args:
95 output:
96 The output of the model.
97 labels:
98 Should always be None, will be ignored.
99
100 Returns:
101 The output if the memory bank is of size 0, otherwise the output
102 and the entries from the memory bank.
103
104 """
105
106 # no memory bank, return the output
107 if self.size == 0:
108 return output, None
109
110 _, dim = output.shape
111
112 # initialize the memory bank if it is not already done
113 if self.bank.nelement() == 0:
114 self._init_memory_bank(dim)
115
116 # query and update memory bank
117 bank = self.bank.clone().detach()
118
119 # only update memory bank if we later do backward pass (gradient)
120 if update:
121 self._dequeue_and_enqueue(output)
122
123 return output, bank
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightly/loss/memory_bank.py b/lightly/loss/memory_bank.py
--- a/lightly/loss/memory_bank.py
+++ b/lightly/loss/memory_bank.py
@@ -63,7 +63,7 @@
# https://github.com/facebookresearch/moco but we don't
# want to pollute our checkpoints
self.bank = torch.randn(dim, self.size).type_as(self.bank)
- torch.nn.functional.normalize(self.bank, dim=0)
+ self.bank = torch.nn.functional.normalize(self.bank, dim=0)
self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)
@torch.no_grad()
| {"golden_diff": "diff --git a/lightly/loss/memory_bank.py b/lightly/loss/memory_bank.py\n--- a/lightly/loss/memory_bank.py\n+++ b/lightly/loss/memory_bank.py\n@@ -63,7 +63,7 @@\n # https://github.com/facebookresearch/moco but we don't\n # want to pollute our checkpoints\n self.bank = torch.randn(dim, self.size).type_as(self.bank)\n- torch.nn.functional.normalize(self.bank, dim=0)\n+ self.bank = torch.nn.functional.normalize(self.bank, dim=0)\n self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)\n \n @torch.no_grad()\n", "issue": "Loss stuck\nHi, I am trying to run the tutorial posted here \r\nhttps://docs.lightly.ai/self-supervised-learning/tutorials/package/tutorial_moco_memory_bank.html\r\nBut my loss is stuck at 8.32 after 100 epochs\r\npython 3.9\r\npytorch-lightning 1.8.1 \r\nlightly 1.2.38\r\n\r\nAny suggestions on how I should troubleshoot this?\r\nThanks in advance!\n", "before_files": [{"content": "\"\"\" Memory Bank Wrapper \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nimport functools\n\nclass MemoryBankModule(torch.nn.Module):\n \"\"\"Memory bank implementation\n\n This is a parent class to all loss functions implemented by the lightly\n Python package. This way, any loss can be used with a memory bank if \n desired.\n\n Attributes:\n size:\n Number of keys the memory bank can store. If set to 0,\n memory bank is not used.\n\n Examples:\n >>> class MyLossFunction(MemoryBankModule):\n >>>\n >>> def __init__(self, memory_bank_size: int = 2 ** 16):\n >>> super(MyLossFunction, self).__init__(memory_bank_size)\n >>>\n >>> def forward(self, output: torch.Tensor,\n >>> labels: torch.Tensor = None):\n >>>\n >>> output, negatives = super(\n >>> MyLossFunction, self).forward(output)\n >>>\n >>> if negatives is not None:\n >>> # evaluate loss with negative samples\n >>> else:\n >>> # evaluate loss without negative samples\n\n \"\"\"\n\n def __init__(self, size: int = 2 ** 16):\n\n super(MemoryBankModule, self).__init__()\n\n if size < 0:\n msg = f'Illegal memory bank size {size}, must be non-negative.'\n raise ValueError(msg)\n\n self.size = size\n self.register_buffer(\"bank\", tensor=torch.empty(0, dtype=torch.float), persistent=False)\n self.register_buffer(\"bank_ptr\", tensor=torch.empty(0, dtype=torch.long), persistent=False)\n\n @torch.no_grad()\n def _init_memory_bank(self, dim: int):\n \"\"\"Initialize the memory bank if it's empty\n\n Args:\n dim:\n The dimension of the which are stored in the bank.\n\n \"\"\"\n # create memory bank\n # we could use register buffers like in the moco repo\n # https://github.com/facebookresearch/moco but we don't\n # want to pollute our checkpoints\n self.bank = torch.randn(dim, self.size).type_as(self.bank)\n torch.nn.functional.normalize(self.bank, dim=0)\n self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, batch: torch.Tensor):\n \"\"\"Dequeue the oldest batch and add the latest one\n\n Args:\n batch:\n The latest batch of keys to add to the memory bank.\n\n \"\"\"\n batch_size = batch.shape[0]\n ptr = int(self.bank_ptr)\n\n if ptr + batch_size >= self.size:\n self.bank[:, ptr:] = batch[:self.size - ptr].T.detach()\n self.bank_ptr[0] = 0\n else:\n self.bank[:, ptr:ptr + batch_size] = batch.T.detach()\n self.bank_ptr[0] = ptr + batch_size\n\n def forward(self,\n output: torch.Tensor,\n labels: torch.Tensor = None,\n update: bool = False):\n \"\"\"Query memory bank for additional negative samples\n\n Args:\n output:\n The output of the model.\n labels:\n Should always be None, will be ignored.\n\n Returns:\n The output if the memory bank is of size 0, otherwise the output\n and the entries from the memory bank.\n\n \"\"\"\n\n # no memory bank, return the output\n if self.size == 0:\n return output, None\n\n _, dim = output.shape\n\n # initialize the memory bank if it is not already done\n if self.bank.nelement() == 0:\n self._init_memory_bank(dim)\n\n # query and update memory bank\n bank = self.bank.clone().detach()\n\n # only update memory bank if we later do backward pass (gradient)\n if update:\n self._dequeue_and_enqueue(output)\n\n return output, bank\n", "path": "lightly/loss/memory_bank.py"}], "after_files": [{"content": "\"\"\" Memory Bank Wrapper \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nimport functools\n\nclass MemoryBankModule(torch.nn.Module):\n \"\"\"Memory bank implementation\n\n This is a parent class to all loss functions implemented by the lightly\n Python package. This way, any loss can be used with a memory bank if \n desired.\n\n Attributes:\n size:\n Number of keys the memory bank can store. If set to 0,\n memory bank is not used.\n\n Examples:\n >>> class MyLossFunction(MemoryBankModule):\n >>>\n >>> def __init__(self, memory_bank_size: int = 2 ** 16):\n >>> super(MyLossFunction, self).__init__(memory_bank_size)\n >>>\n >>> def forward(self, output: torch.Tensor,\n >>> labels: torch.Tensor = None):\n >>>\n >>> output, negatives = super(\n >>> MyLossFunction, self).forward(output)\n >>>\n >>> if negatives is not None:\n >>> # evaluate loss with negative samples\n >>> else:\n >>> # evaluate loss without negative samples\n\n \"\"\"\n\n def __init__(self, size: int = 2 ** 16):\n\n super(MemoryBankModule, self).__init__()\n\n if size < 0:\n msg = f'Illegal memory bank size {size}, must be non-negative.'\n raise ValueError(msg)\n\n self.size = size\n self.register_buffer(\"bank\", tensor=torch.empty(0, dtype=torch.float), persistent=False)\n self.register_buffer(\"bank_ptr\", tensor=torch.empty(0, dtype=torch.long), persistent=False)\n\n @torch.no_grad()\n def _init_memory_bank(self, dim: int):\n \"\"\"Initialize the memory bank if it's empty\n\n Args:\n dim:\n The dimension of the which are stored in the bank.\n\n \"\"\"\n # create memory bank\n # we could use register buffers like in the moco repo\n # https://github.com/facebookresearch/moco but we don't\n # want to pollute our checkpoints\n self.bank = torch.randn(dim, self.size).type_as(self.bank)\n self.bank = torch.nn.functional.normalize(self.bank, dim=0)\n self.bank_ptr = torch.zeros(1).type_as(self.bank_ptr)\n\n @torch.no_grad()\n def _dequeue_and_enqueue(self, batch: torch.Tensor):\n \"\"\"Dequeue the oldest batch and add the latest one\n\n Args:\n batch:\n The latest batch of keys to add to the memory bank.\n\n \"\"\"\n batch_size = batch.shape[0]\n ptr = int(self.bank_ptr)\n\n if ptr + batch_size >= self.size:\n self.bank[:, ptr:] = batch[:self.size - ptr].T.detach()\n self.bank_ptr[0] = 0\n else:\n self.bank[:, ptr:ptr + batch_size] = batch.T.detach()\n self.bank_ptr[0] = ptr + batch_size\n\n def forward(self,\n output: torch.Tensor,\n labels: torch.Tensor = None,\n update: bool = False):\n \"\"\"Query memory bank for additional negative samples\n\n Args:\n output:\n The output of the model.\n labels:\n Should always be None, will be ignored.\n\n Returns:\n The output if the memory bank is of size 0, otherwise the output\n and the entries from the memory bank.\n\n \"\"\"\n\n # no memory bank, return the output\n if self.size == 0:\n return output, None\n\n _, dim = output.shape\n\n # initialize the memory bank if it is not already done\n if self.bank.nelement() == 0:\n self._init_memory_bank(dim)\n\n # query and update memory bank\n bank = self.bank.clone().detach()\n\n # only update memory bank if we later do backward pass (gradient)\n if update:\n self._dequeue_and_enqueue(output)\n\n return output, bank\n", "path": "lightly/loss/memory_bank.py"}]} | 1,491 | 144 |
gh_patches_debug_18007 | rasdani/github-patches | git_diff | chainer__chainer-277 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
NegativeSampling and WalkerAlias do not implement to_cpu()
`NegativeSampling` implements only `to_gpu()`. Therefore, `self.sampler` does not come back to the CPU side even if `to_cpu()` is called. `WalkerAlias` also does not have `to_cpu()`.
Currently this causes a problem with [word2vec example](https://github.com/pfnet/chainer/pull/270) when loading a GPU-trained model based on negative sampling, which [has been transferred](https://github.com/pfnet/chainer/blob/0bab97c584d462e4478f360c26ce427bfe81eb0f/examples/word2vec/train_word2vec.py#L183) by `model.to_cpu()` before save by pickle.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/negative_sampling.py`
Content:
```
1 import numpy
2 import six
3
4 from chainer import cuda
5 from chainer import function
6 from chainer.utils import type_check
7 from chainer.utils import walker_alias
8
9
10 class NegativeSampling(function.Function):
11 """Implementation of negative sampling.
12
13 In natural language processing, especially language modeling, the number of
14 vocabulary is very large.
15 Therefore, you need to spend a lot of time to calculate the gradient of the
16 embedding matrix.
17
18 Instead, in negative sampling trick, you only need to calculate the
19 gradient for a few sampled negative examples.
20
21 The objective function is below:
22
23 .. math::
24
25 f(x, p) = \log\sigma(x^\\top w_p) + \\
26 k E_{i \sim P(i)}[\log\sigma(- x^\\top w_i)],
27
28 where :math:`\sigma(\cdot)` is a sigmoid function, :math:`w_i` is the
29 weight vector for the word :math:`i`, and :math:`p` is a positive example.
30 It is approximeted with :math:`k` examples :math:`N` sampled from
31 probability :math:`P(i)`, like this:
32
33 .. math::
34
35 f(x, p) \\approx \log\sigma(x^\\top w_p) + \\
36 \sum_{n \in N} \log\sigma(-x^\\top w_n).
37
38 Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
39 This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
40 :math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
41 a hyper-parameter, and :math:`Z` is the normalization constant.
42
43 Args:
44 in_size (int): Dimension of input vectors.
45 counts (int list): Number of each identifiers.
46 sample_size (int): Number of negative samples.
47 power (float): Power factor :math:`\\alpha`.
48
49 See: `Distributed Representations of Words and Phrases and their\
50 Compositionality <http://arxiv.org/abs/1310.4546>`_
51 """
52
53 parameter_names = ('W',)
54 gradient_names = ('gW',)
55
56 def __init__(self, in_size, counts, sample_size, power=0.75):
57 self.sample_size = sample_size
58 p = numpy.array(counts, numpy.float32)
59 p = numpy.power(p, power)
60 self.sampler = walker_alias.WalkerAlias(p)
61
62 vocab_size = len(counts)
63 self.W = numpy.zeros((vocab_size, in_size)).astype(numpy.float32)
64 self.gW = numpy.zeros_like(self.W)
65
66 def _make_samples(self, t):
67 if hasattr(self, 'samples'):
68 return self.samples
69
70 size = int(t.shape[0])
71 # first one is the positive, and others are sampled negatives
72 samples = self.sampler.sample((size, self.sample_size + 1))
73 if isinstance(samples, numpy.ndarray):
74 samples.T[0] = t
75 else:
76 cuda.elementwise(
77 'const int* t, int* s, int m',
78 ''' s[i * m] = t[i]; ''',
79 'negative_sampling_assign'
80 )(t, samples, self.sample_size + 1)
81
82 self.samples = samples
83
84 def check_type_forward(self, in_types):
85 type_check.expect(in_types.size() == 2)
86 x_type, t_type = in_types
87
88 type_check.expect(
89 x_type.dtype == numpy.float32,
90 x_type.ndim == 2,
91 t_type.dtype == numpy.int32,
92 t_type.ndim == 1,
93 x_type.shape[0] == t_type.shape[0]
94 )
95
96 def check_type_backward(self, in_types, out_types):
97 type_check.expect(
98 out_types.size() == 1,
99 out_types[0].dtype == numpy.float32,
100 out_types[0].ndim == 0
101 )
102
103 def to_gpu(self, device=None):
104 function.Function.to_gpu(self, device)
105 self.sampler.to_gpu()
106
107 def forward_cpu(self, inputs):
108 x, t = inputs
109 self._make_samples(t)
110
111 loss = numpy.float32(0.0)
112 for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
113 w = self.W[k]
114 f = w.dot(ix)
115 f[0] *= -1 # positive sample
116 loss += numpy.sum(numpy.logaddexp(f, 0))
117 return numpy.array(loss, numpy.float32),
118
119 def forward_gpu(self, inputs):
120 x, t = inputs
121 n_in = x.shape[1]
122 self._make_samples(t)
123
124 wx = cuda.empty((x.shape[0], self.sample_size + 1))
125 cuda.elementwise(
126 '''float* wx, const float* W, const float* x, const int* k, int c,
127 int m''',
128 '''
129 x = &x[(i / m) * c];
130 W = &W[k[i] * c];
131 float f = 0;
132 for (int j = 0; j < c; ++j) {
133 f += x[j] * W[j];
134 }
135 wx[i] = f;
136 ''',
137 'negative_sampling_wx'
138 )(wx, self.W, x, self.samples, n_in, self.sample_size + 1)
139 self.wx = wx
140
141 y = cuda.zeros_like(wx)
142 cuda.elementwise(
143 'float* y, const float* wx, int c, int m',
144 '''
145 float f = wx[i];
146 if (i % m == 0) {
147 f = -f;
148 }
149 float loss;
150 if (f < 0) {
151 loss = __logf(1 + __expf(f));
152 } else {
153 loss = f + __logf(1 + __expf(-f));
154 }
155 y[i] = loss;
156 ''',
157 'negative_sampling_forward'
158 )(y, wx, n_in, self.sample_size + 1)
159 loss = cuda.gpuarray.sum(y)
160 return loss,
161
162 def backward_cpu(self, inputs, grads):
163 x, t = inputs
164 gloss, = grads
165
166 gx = numpy.zeros_like(x)
167
168 for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
169 w = self.W[k]
170 f = w.dot(ix)
171
172 # g == -y * gloss / (1 + exp(yf))
173 f[0] *= -1
174 g = gloss / (1 + numpy.exp(-f))
175 g[0] *= -1
176
177 gx[i] = g.dot(w)
178 for ik, ig in six.moves.zip(k, g):
179 self.gW[ik] += ig * ix
180 return gx, None
181
182 def backward_gpu(self, inputs, grads):
183 x, t = inputs
184 gloss, = grads
185
186 n_in = x.shape[1]
187 g = cuda.empty_like(self.wx)
188 cuda.elementwise(
189 'float* g, const float* wx, const float* gloss, int m',
190 '''
191 float y;
192 if (i % m == 0) {
193 y = 1;
194 } else {
195 y = -1;
196 }
197
198 g[i] = -y * *gloss / (1.0f + __expf(wx[i] * y));
199 ''',
200 'negative_sampling_calculate_g'
201 )(g, self.wx, gloss, self.sample_size + 1)
202 gx = cuda.zeros_like(x)
203 cuda.elementwise(
204 '''float* gx, const float* g, const float* W, const int* k, int c,
205 int m''',
206 '''
207 int d = i / c;
208 g = &g[d * m];
209 k = &k[d * m];
210 float w = 0;
211 for (int j = 0; j < m; ++j) {
212 w += g[j] * W[k[j] * c + i % c];
213 }
214 gx[i] = w;
215 ''',
216 'negative_sampling_calculate_gx'
217 )(gx, g, self.W, self.samples, n_in, self.sample_size + 1)
218 cuda.elementwise(
219 '''const float * g, const float* x, const int* k, float* gW, int c,
220 int m''',
221 '''
222 x = &x[(i / m) * c];
223 gW = &gW[k[i] * c];
224 float gi = g[i];
225 for (int j = 0; j < c; ++j) {
226 atomicAdd(gW + j, gi * x[j]);
227 }
228 ''',
229 'negative_sampling_calculate_gw'
230 )(g, x, self.samples, self.gW, n_in, self.sample_size + 1)
231 return gx, None
232
```
Path: `chainer/utils/walker_alias.py`
Content:
```
1 import numpy
2
3 from chainer import cuda
4
5
6 class WalkerAlias(object):
7 """Implementation of Walker's alias method.
8
9 This method generates a random sample from given probabilities
10 :math:`p_1, \dots, p_n` in :math:`O(1)` time.
11 It is more efficient than :func:`~numpy.random.choice`.
12 This class has sampling methods in CPU and in GPU.
13
14 Args:
15 probs (float list): Probabilities of entries. They are normalized with
16 `sum(probs)`.
17
18 See: `Wikipedia article <https://en.wikipedia.org/wiki/Alias_method>`_
19
20 """
21
22 def __init__(self, probs):
23 prob = numpy.array(probs, numpy.float32)
24 prob /= numpy.sum(prob)
25 threshold = numpy.ndarray(len(probs), numpy.float32)
26 values = numpy.ndarray(len(probs) * 2, numpy.int32)
27 il, ir = 0, 0
28 pairs = list(zip(prob, range(len(probs))))
29 pairs.sort()
30 for prob, i in pairs:
31 p = prob * len(probs)
32 while p > 1 and ir < len(threshold):
33 values[ir * 2 + 1] = i
34 p -= 1.0 - threshold[ir]
35 ir += 1
36 threshold[il] = p
37 values[il * 2] = i
38 il += 1
39 # fill the rest
40 for i in range(ir, len(probs)):
41 values[i * 2 + 1] = 0
42
43 assert((values < len(threshold)).all())
44 self.threshold = threshold
45 self.values = values
46 self.use_gpu = False
47
48 def to_gpu(self):
49 """Make a sampler GPU mode.
50
51 """
52 if not self.use_gpu:
53 self.threshold = cuda.to_gpu(self.threshold)
54 self.values = cuda.to_gpu(self.values)
55 self.use_gpu = True
56
57 def sample(self, shape):
58 """Generates a random sample based on given probabilities.
59
60 Args:
61 shape (tuple of int): Shape of a return value.
62
63 Returns:
64 Returns a generated array with the given shape. If a sampler is in
65 CPU mode the return value is :class:`~numpy.ndarray`, and if it is
66 in GPU mode the return value is :class:`~pycuda.gpuarray.GPUArray`.
67 """
68 if self.use_gpu:
69 return self.sample_gpu(shape)
70 else:
71 return self.sample_cpu(shape)
72
73 def sample_cpu(self, shape):
74 ps = numpy.random.uniform(0, 1, shape)
75 pb = ps * len(self.threshold)
76 index = pb.astype(numpy.int32)
77 left_right = (self.threshold[index] < pb - index).astype(numpy.int32)
78 return self.values[index * 2 + left_right]
79
80 def sample_gpu(self, shape):
81 ps = cuda.empty(shape, numpy.float32)
82 cuda.get_generator().fill_uniform(ps)
83 vs = cuda.empty(shape, numpy.int32)
84 cuda.elementwise(
85 '''int* vs, const float* ps, const float* threshold,
86 const int* values, int b''',
87 '''
88 float pb = ps[i] * b;
89 int index = __float2int_rd(pb);
90 // fill_uniform sometimes returns 1.0, so we need to check index
91 if (index >= b) {
92 index = 0;
93 }
94 int lr = threshold[index] < pb - index;
95 vs[i] = values[index * 2 + lr];
96 ''',
97 'walker_alias_sample'
98 )(vs, ps, self.threshold, self.values, len(self.threshold))
99 return vs
100
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/negative_sampling.py b/chainer/functions/negative_sampling.py
--- a/chainer/functions/negative_sampling.py
+++ b/chainer/functions/negative_sampling.py
@@ -104,6 +104,10 @@
function.Function.to_gpu(self, device)
self.sampler.to_gpu()
+ def to_cpu(self):
+ function.Function.to_cpu(self)
+ self.sampler.to_cpu()
+
def forward_cpu(self, inputs):
x, t = inputs
self._make_samples(t)
diff --git a/chainer/utils/walker_alias.py b/chainer/utils/walker_alias.py
--- a/chainer/utils/walker_alias.py
+++ b/chainer/utils/walker_alias.py
@@ -54,6 +54,15 @@
self.values = cuda.to_gpu(self.values)
self.use_gpu = True
+ def to_cpu(self):
+ """Make a sampler CPU mode.
+
+ """
+ if self.use_gpu:
+ self.threshold = cuda.to_cpu(self.threshold)
+ self.values = cuda.to_cpu(self.values)
+ self.use_gpu = False
+
def sample(self, shape):
"""Generates a random sample based on given probabilities.
| {"golden_diff": "diff --git a/chainer/functions/negative_sampling.py b/chainer/functions/negative_sampling.py\n--- a/chainer/functions/negative_sampling.py\n+++ b/chainer/functions/negative_sampling.py\n@@ -104,6 +104,10 @@\n function.Function.to_gpu(self, device)\n self.sampler.to_gpu()\n \n+ def to_cpu(self):\n+ function.Function.to_cpu(self)\n+ self.sampler.to_cpu()\n+\n def forward_cpu(self, inputs):\n x, t = inputs\n self._make_samples(t)\ndiff --git a/chainer/utils/walker_alias.py b/chainer/utils/walker_alias.py\n--- a/chainer/utils/walker_alias.py\n+++ b/chainer/utils/walker_alias.py\n@@ -54,6 +54,15 @@\n self.values = cuda.to_gpu(self.values)\n self.use_gpu = True\n \n+ def to_cpu(self):\n+ \"\"\"Make a sampler CPU mode.\n+\n+ \"\"\"\n+ if self.use_gpu:\n+ self.threshold = cuda.to_cpu(self.threshold)\n+ self.values = cuda.to_cpu(self.values)\n+ self.use_gpu = False\n+\n def sample(self, shape):\n \"\"\"Generates a random sample based on given probabilities.\n", "issue": "NegativeSampling and WalkerAlias do not implement to_cpu()\n`NegativeSampling` implements only `to_gpu()`. Therefore, `self.sampler` does not come back to the CPU side even if `to_cpu()` is called. `WalkerAlias` also does not have `to_cpu()`.\n\nCurrently this causes a problem with [word2vec example](https://github.com/pfnet/chainer/pull/270) when loading a GPU-trained model based on negative sampling, which [has been transferred](https://github.com/pfnet/chainer/blob/0bab97c584d462e4478f360c26ce427bfe81eb0f/examples/word2vec/train_word2vec.py#L183) by `model.to_cpu()` before save by pickle.\n\n", "before_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\nfrom chainer.utils import walker_alias\n\n\nclass NegativeSampling(function.Function):\n \"\"\"Implementation of negative sampling.\n\n In natural language processing, especially language modeling, the number of\n vocabulary is very large.\n Therefore, you need to spend a lot of time to calculate the gradient of the\n embedding matrix.\n\n Instead, in negative sampling trick, you only need to calculate the\n gradient for a few sampled negative examples.\n\n The objective function is below:\n\n .. math::\n\n f(x, p) = \\log\\sigma(x^\\\\top w_p) + \\\\\n k E_{i \\sim P(i)}[\\log\\sigma(- x^\\\\top w_i)],\n\n where :math:`\\sigma(\\cdot)` is a sigmoid function, :math:`w_i` is the\n weight vector for the word :math:`i`, and :math:`p` is a positive example.\n It is approximeted with :math:`k` examples :math:`N` sampled from\n probability :math:`P(i)`, like this:\n\n .. math::\n\n f(x, p) \\\\approx \\log\\sigma(x^\\\\top w_p) + \\\\\n \\sum_{n \\in N} \\log\\sigma(-x^\\\\top w_n).\n\n Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.\n This is calculated as :math:`P(w) = \\\\frac{1}{Z} c(w)^\\\\alpha`, where\n :math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\\\alpha` is\n a hyper-parameter, and :math:`Z` is the normalization constant.\n\n Args:\n in_size (int): Dimension of input vectors.\n counts (int list): Number of each identifiers.\n sample_size (int): Number of negative samples.\n power (float): Power factor :math:`\\\\alpha`.\n\n See: `Distributed Representations of Words and Phrases and their\\\n Compositionality <http://arxiv.org/abs/1310.4546>`_\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, counts, sample_size, power=0.75):\n self.sample_size = sample_size\n p = numpy.array(counts, numpy.float32)\n p = numpy.power(p, power)\n self.sampler = walker_alias.WalkerAlias(p)\n\n vocab_size = len(counts)\n self.W = numpy.zeros((vocab_size, in_size)).astype(numpy.float32)\n self.gW = numpy.zeros_like(self.W)\n\n def _make_samples(self, t):\n if hasattr(self, 'samples'):\n return self.samples\n\n size = int(t.shape[0])\n # first one is the positive, and others are sampled negatives\n samples = self.sampler.sample((size, self.sample_size + 1))\n if isinstance(samples, numpy.ndarray):\n samples.T[0] = t\n else:\n cuda.elementwise(\n 'const int* t, int* s, int m',\n ''' s[i * m] = t[i]; ''',\n 'negative_sampling_assign'\n )(t, samples, self.sample_size + 1)\n\n self.samples = samples\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n self.sampler.to_gpu()\n\n def forward_cpu(self, inputs):\n x, t = inputs\n self._make_samples(t)\n\n loss = numpy.float32(0.0)\n for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):\n w = self.W[k]\n f = w.dot(ix)\n f[0] *= -1 # positive sample\n loss += numpy.sum(numpy.logaddexp(f, 0))\n return numpy.array(loss, numpy.float32),\n\n def forward_gpu(self, inputs):\n x, t = inputs\n n_in = x.shape[1]\n self._make_samples(t)\n\n wx = cuda.empty((x.shape[0], self.sample_size + 1))\n cuda.elementwise(\n '''float* wx, const float* W, const float* x, const int* k, int c,\n int m''',\n '''\n x = &x[(i / m) * c];\n W = &W[k[i] * c];\n float f = 0;\n for (int j = 0; j < c; ++j) {\n f += x[j] * W[j];\n }\n wx[i] = f;\n ''',\n 'negative_sampling_wx'\n )(wx, self.W, x, self.samples, n_in, self.sample_size + 1)\n self.wx = wx\n\n y = cuda.zeros_like(wx)\n cuda.elementwise(\n 'float* y, const float* wx, int c, int m',\n '''\n float f = wx[i];\n if (i % m == 0) {\n f = -f;\n }\n float loss;\n if (f < 0) {\n loss = __logf(1 + __expf(f));\n } else {\n loss = f + __logf(1 + __expf(-f));\n }\n y[i] = loss;\n ''',\n 'negative_sampling_forward'\n )(y, wx, n_in, self.sample_size + 1)\n loss = cuda.gpuarray.sum(y)\n return loss,\n\n def backward_cpu(self, inputs, grads):\n x, t = inputs\n gloss, = grads\n\n gx = numpy.zeros_like(x)\n\n for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):\n w = self.W[k]\n f = w.dot(ix)\n\n # g == -y * gloss / (1 + exp(yf))\n f[0] *= -1\n g = gloss / (1 + numpy.exp(-f))\n g[0] *= -1\n\n gx[i] = g.dot(w)\n for ik, ig in six.moves.zip(k, g):\n self.gW[ik] += ig * ix\n return gx, None\n\n def backward_gpu(self, inputs, grads):\n x, t = inputs\n gloss, = grads\n\n n_in = x.shape[1]\n g = cuda.empty_like(self.wx)\n cuda.elementwise(\n 'float* g, const float* wx, const float* gloss, int m',\n '''\n float y;\n if (i % m == 0) {\n y = 1;\n } else {\n y = -1;\n }\n\n g[i] = -y * *gloss / (1.0f + __expf(wx[i] * y));\n ''',\n 'negative_sampling_calculate_g'\n )(g, self.wx, gloss, self.sample_size + 1)\n gx = cuda.zeros_like(x)\n cuda.elementwise(\n '''float* gx, const float* g, const float* W, const int* k, int c,\n int m''',\n '''\n int d = i / c;\n g = &g[d * m];\n k = &k[d * m];\n float w = 0;\n for (int j = 0; j < m; ++j) {\n w += g[j] * W[k[j] * c + i % c];\n }\n gx[i] = w;\n ''',\n 'negative_sampling_calculate_gx'\n )(gx, g, self.W, self.samples, n_in, self.sample_size + 1)\n cuda.elementwise(\n '''const float * g, const float* x, const int* k, float* gW, int c,\n int m''',\n '''\n x = &x[(i / m) * c];\n gW = &gW[k[i] * c];\n float gi = g[i];\n for (int j = 0; j < c; ++j) {\n atomicAdd(gW + j, gi * x[j]);\n }\n ''',\n 'negative_sampling_calculate_gw'\n )(g, x, self.samples, self.gW, n_in, self.sample_size + 1)\n return gx, None\n", "path": "chainer/functions/negative_sampling.py"}, {"content": "import numpy\n\nfrom chainer import cuda\n\n\nclass WalkerAlias(object):\n \"\"\"Implementation of Walker's alias method.\n\n This method generates a random sample from given probabilities\n :math:`p_1, \\dots, p_n` in :math:`O(1)` time.\n It is more efficient than :func:`~numpy.random.choice`.\n This class has sampling methods in CPU and in GPU.\n\n Args:\n probs (float list): Probabilities of entries. They are normalized with\n `sum(probs)`.\n\n See: `Wikipedia article <https://en.wikipedia.org/wiki/Alias_method>`_\n\n \"\"\"\n\n def __init__(self, probs):\n prob = numpy.array(probs, numpy.float32)\n prob /= numpy.sum(prob)\n threshold = numpy.ndarray(len(probs), numpy.float32)\n values = numpy.ndarray(len(probs) * 2, numpy.int32)\n il, ir = 0, 0\n pairs = list(zip(prob, range(len(probs))))\n pairs.sort()\n for prob, i in pairs:\n p = prob * len(probs)\n while p > 1 and ir < len(threshold):\n values[ir * 2 + 1] = i\n p -= 1.0 - threshold[ir]\n ir += 1\n threshold[il] = p\n values[il * 2] = i\n il += 1\n # fill the rest\n for i in range(ir, len(probs)):\n values[i * 2 + 1] = 0\n\n assert((values < len(threshold)).all())\n self.threshold = threshold\n self.values = values\n self.use_gpu = False\n\n def to_gpu(self):\n \"\"\"Make a sampler GPU mode.\n\n \"\"\"\n if not self.use_gpu:\n self.threshold = cuda.to_gpu(self.threshold)\n self.values = cuda.to_gpu(self.values)\n self.use_gpu = True\n\n def sample(self, shape):\n \"\"\"Generates a random sample based on given probabilities.\n\n Args:\n shape (tuple of int): Shape of a return value.\n\n Returns:\n Returns a generated array with the given shape. If a sampler is in\n CPU mode the return value is :class:`~numpy.ndarray`, and if it is\n in GPU mode the return value is :class:`~pycuda.gpuarray.GPUArray`.\n \"\"\"\n if self.use_gpu:\n return self.sample_gpu(shape)\n else:\n return self.sample_cpu(shape)\n\n def sample_cpu(self, shape):\n ps = numpy.random.uniform(0, 1, shape)\n pb = ps * len(self.threshold)\n index = pb.astype(numpy.int32)\n left_right = (self.threshold[index] < pb - index).astype(numpy.int32)\n return self.values[index * 2 + left_right]\n\n def sample_gpu(self, shape):\n ps = cuda.empty(shape, numpy.float32)\n cuda.get_generator().fill_uniform(ps)\n vs = cuda.empty(shape, numpy.int32)\n cuda.elementwise(\n '''int* vs, const float* ps, const float* threshold,\n const int* values, int b''',\n '''\n float pb = ps[i] * b;\n int index = __float2int_rd(pb);\n // fill_uniform sometimes returns 1.0, so we need to check index\n if (index >= b) {\n index = 0;\n }\n int lr = threshold[index] < pb - index;\n vs[i] = values[index * 2 + lr];\n ''',\n 'walker_alias_sample'\n )(vs, ps, self.threshold, self.values, len(self.threshold))\n return vs\n", "path": "chainer/utils/walker_alias.py"}], "after_files": [{"content": "import numpy\nimport six\n\nfrom chainer import cuda\nfrom chainer import function\nfrom chainer.utils import type_check\nfrom chainer.utils import walker_alias\n\n\nclass NegativeSampling(function.Function):\n \"\"\"Implementation of negative sampling.\n\n In natural language processing, especially language modeling, the number of\n vocabulary is very large.\n Therefore, you need to spend a lot of time to calculate the gradient of the\n embedding matrix.\n\n Instead, in negative sampling trick, you only need to calculate the\n gradient for a few sampled negative examples.\n\n The objective function is below:\n\n .. math::\n\n f(x, p) = \\log\\sigma(x^\\\\top w_p) + \\\\\n k E_{i \\sim P(i)}[\\log\\sigma(- x^\\\\top w_i)],\n\n where :math:`\\sigma(\\cdot)` is a sigmoid function, :math:`w_i` is the\n weight vector for the word :math:`i`, and :math:`p` is a positive example.\n It is approximeted with :math:`k` examples :math:`N` sampled from\n probability :math:`P(i)`, like this:\n\n .. math::\n\n f(x, p) \\\\approx \\log\\sigma(x^\\\\top w_p) + \\\\\n \\sum_{n \\in N} \\log\\sigma(-x^\\\\top w_n).\n\n Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.\n This is calculated as :math:`P(w) = \\\\frac{1}{Z} c(w)^\\\\alpha`, where\n :math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\\\alpha` is\n a hyper-parameter, and :math:`Z` is the normalization constant.\n\n Args:\n in_size (int): Dimension of input vectors.\n counts (int list): Number of each identifiers.\n sample_size (int): Number of negative samples.\n power (float): Power factor :math:`\\\\alpha`.\n\n See: `Distributed Representations of Words and Phrases and their\\\n Compositionality <http://arxiv.org/abs/1310.4546>`_\n \"\"\"\n\n parameter_names = ('W',)\n gradient_names = ('gW',)\n\n def __init__(self, in_size, counts, sample_size, power=0.75):\n self.sample_size = sample_size\n p = numpy.array(counts, numpy.float32)\n p = numpy.power(p, power)\n self.sampler = walker_alias.WalkerAlias(p)\n\n vocab_size = len(counts)\n self.W = numpy.zeros((vocab_size, in_size)).astype(numpy.float32)\n self.gW = numpy.zeros_like(self.W)\n\n def _make_samples(self, t):\n if hasattr(self, 'samples'):\n return self.samples\n\n size = int(t.shape[0])\n # first one is the positive, and others are sampled negatives\n samples = self.sampler.sample((size, self.sample_size + 1))\n if isinstance(samples, numpy.ndarray):\n samples.T[0] = t\n else:\n cuda.elementwise(\n 'const int* t, int* s, int m',\n ''' s[i * m] = t[i]; ''',\n 'negative_sampling_assign'\n )(t, samples, self.sample_size + 1)\n\n self.samples = samples\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n x_type, t_type = in_types\n\n type_check.expect(\n x_type.dtype == numpy.float32,\n x_type.ndim == 2,\n t_type.dtype == numpy.int32,\n t_type.ndim == 1,\n x_type.shape[0] == t_type.shape[0]\n )\n\n def check_type_backward(self, in_types, out_types):\n type_check.expect(\n out_types.size() == 1,\n out_types[0].dtype == numpy.float32,\n out_types[0].ndim == 0\n )\n\n def to_gpu(self, device=None):\n function.Function.to_gpu(self, device)\n self.sampler.to_gpu()\n\n def to_cpu(self):\n function.Function.to_cpu(self)\n self.sampler.to_cpu()\n\n def forward_cpu(self, inputs):\n x, t = inputs\n self._make_samples(t)\n\n loss = numpy.float32(0.0)\n for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):\n w = self.W[k]\n f = w.dot(ix)\n f[0] *= -1 # positive sample\n loss += numpy.sum(numpy.logaddexp(f, 0))\n return numpy.array(loss, numpy.float32),\n\n def forward_gpu(self, inputs):\n x, t = inputs\n n_in = x.shape[1]\n self._make_samples(t)\n\n wx = cuda.empty((x.shape[0], self.sample_size + 1))\n cuda.elementwise(\n '''float* wx, const float* W, const float* x, const int* k, int c,\n int m''',\n '''\n x = &x[(i / m) * c];\n W = &W[k[i] * c];\n float f = 0;\n for (int j = 0; j < c; ++j) {\n f += x[j] * W[j];\n }\n wx[i] = f;\n ''',\n 'negative_sampling_wx'\n )(wx, self.W, x, self.samples, n_in, self.sample_size + 1)\n self.wx = wx\n\n y = cuda.zeros_like(wx)\n cuda.elementwise(\n 'float* y, const float* wx, int c, int m',\n '''\n float f = wx[i];\n if (i % m == 0) {\n f = -f;\n }\n float loss;\n if (f < 0) {\n loss = __logf(1 + __expf(f));\n } else {\n loss = f + __logf(1 + __expf(-f));\n }\n y[i] = loss;\n ''',\n 'negative_sampling_forward'\n )(y, wx, n_in, self.sample_size + 1)\n loss = cuda.gpuarray.sum(y)\n return loss,\n\n def backward_cpu(self, inputs, grads):\n x, t = inputs\n gloss, = grads\n\n gx = numpy.zeros_like(x)\n\n for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):\n w = self.W[k]\n f = w.dot(ix)\n\n # g == -y * gloss / (1 + exp(yf))\n f[0] *= -1\n g = gloss / (1 + numpy.exp(-f))\n g[0] *= -1\n\n gx[i] = g.dot(w)\n for ik, ig in six.moves.zip(k, g):\n self.gW[ik] += ig * ix\n return gx, None\n\n def backward_gpu(self, inputs, grads):\n x, t = inputs\n gloss, = grads\n\n n_in = x.shape[1]\n g = cuda.empty_like(self.wx)\n cuda.elementwise(\n 'float* g, const float* wx, const float* gloss, int m',\n '''\n float y;\n if (i % m == 0) {\n y = 1;\n } else {\n y = -1;\n }\n\n g[i] = -y * *gloss / (1.0f + __expf(wx[i] * y));\n ''',\n 'negative_sampling_calculate_g'\n )(g, self.wx, gloss, self.sample_size + 1)\n gx = cuda.zeros_like(x)\n cuda.elementwise(\n '''float* gx, const float* g, const float* W, const int* k, int c,\n int m''',\n '''\n int d = i / c;\n g = &g[d * m];\n k = &k[d * m];\n float w = 0;\n for (int j = 0; j < m; ++j) {\n w += g[j] * W[k[j] * c + i % c];\n }\n gx[i] = w;\n ''',\n 'negative_sampling_calculate_gx'\n )(gx, g, self.W, self.samples, n_in, self.sample_size + 1)\n cuda.elementwise(\n '''const float * g, const float* x, const int* k, float* gW, int c,\n int m''',\n '''\n x = &x[(i / m) * c];\n gW = &gW[k[i] * c];\n float gi = g[i];\n for (int j = 0; j < c; ++j) {\n atomicAdd(gW + j, gi * x[j]);\n }\n ''',\n 'negative_sampling_calculate_gw'\n )(g, x, self.samples, self.gW, n_in, self.sample_size + 1)\n return gx, None\n", "path": "chainer/functions/negative_sampling.py"}, {"content": "import numpy\n\nfrom chainer import cuda\n\n\nclass WalkerAlias(object):\n \"\"\"Implementation of Walker's alias method.\n\n This method generates a random sample from given probabilities\n :math:`p_1, \\dots, p_n` in :math:`O(1)` time.\n It is more efficient than :func:`~numpy.random.choice`.\n This class has sampling methods in CPU and in GPU.\n\n Args:\n probs (float list): Probabilities of entries. They are normalized with\n `sum(probs)`.\n\n See: `Wikipedia article <https://en.wikipedia.org/wiki/Alias_method>`_\n\n \"\"\"\n\n def __init__(self, probs):\n prob = numpy.array(probs, numpy.float32)\n prob /= numpy.sum(prob)\n threshold = numpy.ndarray(len(probs), numpy.float32)\n values = numpy.ndarray(len(probs) * 2, numpy.int32)\n il, ir = 0, 0\n pairs = list(zip(prob, range(len(probs))))\n pairs.sort()\n for prob, i in pairs:\n p = prob * len(probs)\n while p > 1 and ir < len(threshold):\n values[ir * 2 + 1] = i\n p -= 1.0 - threshold[ir]\n ir += 1\n threshold[il] = p\n values[il * 2] = i\n il += 1\n # fill the rest\n for i in range(ir, len(probs)):\n values[i * 2 + 1] = 0\n\n assert((values < len(threshold)).all())\n self.threshold = threshold\n self.values = values\n self.use_gpu = False\n\n def to_gpu(self):\n \"\"\"Make a sampler GPU mode.\n\n \"\"\"\n if not self.use_gpu:\n self.threshold = cuda.to_gpu(self.threshold)\n self.values = cuda.to_gpu(self.values)\n self.use_gpu = True\n\n def to_cpu(self):\n \"\"\"Make a sampler CPU mode.\n\n \"\"\"\n if self.use_gpu:\n self.threshold = cuda.to_cpu(self.threshold)\n self.values = cuda.to_cpu(self.values)\n self.use_gpu = False\n\n def sample(self, shape):\n \"\"\"Generates a random sample based on given probabilities.\n\n Args:\n shape (tuple of int): Shape of a return value.\n\n Returns:\n Returns a generated array with the given shape. If a sampler is in\n CPU mode the return value is :class:`~numpy.ndarray`, and if it is\n in GPU mode the return value is :class:`~pycuda.gpuarray.GPUArray`.\n \"\"\"\n if self.use_gpu:\n return self.sample_gpu(shape)\n else:\n return self.sample_cpu(shape)\n\n def sample_cpu(self, shape):\n ps = numpy.random.uniform(0, 1, shape)\n pb = ps * len(self.threshold)\n index = pb.astype(numpy.int32)\n left_right = (self.threshold[index] < pb - index).astype(numpy.int32)\n return self.values[index * 2 + left_right]\n\n def sample_gpu(self, shape):\n ps = cuda.empty(shape, numpy.float32)\n cuda.get_generator().fill_uniform(ps)\n vs = cuda.empty(shape, numpy.int32)\n cuda.elementwise(\n '''int* vs, const float* ps, const float* threshold,\n const int* values, int b''',\n '''\n float pb = ps[i] * b;\n int index = __float2int_rd(pb);\n // fill_uniform sometimes returns 1.0, so we need to check index\n if (index >= b) {\n index = 0;\n }\n int lr = threshold[index] < pb - index;\n vs[i] = values[index * 2 + lr];\n ''',\n 'walker_alias_sample'\n )(vs, ps, self.threshold, self.values, len(self.threshold))\n return vs\n", "path": "chainer/utils/walker_alias.py"}]} | 4,028 | 265 |
gh_patches_debug_45351 | rasdani/github-patches | git_diff | quantumlib__Cirq-5261 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs: Filter out TYPE_CHECKING from public docs
**Description of the issue**
The `TYPE_CHECKING` variable imported from `typing` shows up in API docs (example: https://github.com/quantumlib/Cirq/issues/5150). We should filter it out, since it's not part of the cirq API. Per @dabacon's [comment](https://github.com/quantumlib/Cirq/pull/5229#issuecomment-1093080151), we should be able to do this in `dev_tools/docs/build_api_docs.py`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dev_tools/docs/build_api_docs.py`
Content:
```
1 # Copyright 2021 The Cirq Developers
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Tool to generate external api_docs for Cirq.
16
17 In order to publish to our site, devsite runs two jobs for us: stable and nightly.
18 The stable one downloads the latest cirq release from pypi and uses that to generate the reference
19 API docs.
20 The nightly one downloads the latest cirq pre-release (pip install cirq --pre) and uses that to
21 generate the "nightly diff".
22
23 This script needs to cater for both of these cases.
24 """
25
26 import os
27 import types
28
29 import networkx
30 from absl import app
31 from absl import flags
32 from tensorflow_docs.api_generator import doc_controls
33 from tensorflow_docs.api_generator import generate_lib
34 from tensorflow_docs.api_generator import public_api
35
36 import cirq
37 import cirq_aqt
38 import cirq_google
39 import cirq_ionq
40 import cirq_pasqal
41 import cirq_rigetti
42 import cirq_web
43
44 from cirq import _doc
45
46 flags.DEFINE_string("output_dir", "docs/api_docs", "Where to output the docs")
47
48 flags.DEFINE_string(
49 "code_url_prefix",
50 "https://github.com/quantumlib/Cirq/blob/master",
51 "The url prefix for links to code.",
52 )
53
54 flags.DEFINE_bool("search_hints", True, "Include metadata search hints in the generated files")
55
56 flags.DEFINE_string("site_path", "reference/python", "Path prefix in the _toc.yaml")
57
58 FLAGS = flags.FLAGS
59
60
61 def filter_unwanted_inherited_methods(path, parent, children):
62 """Filter the unwanted inherited methods.
63
64 CircuitDag inherits a lot of methods from `networkx.DiGraph` and `Graph`.
65 This filter removes these, as it creates a lot of noise in the API docs.
66 """
67 if parent.__name__ != "CircuitDag":
68 return children
69
70 filtered_children = []
71 for name, obj in children:
72 if isinstance(obj, types.FunctionType):
73 if obj.__module__.startswith('cirq'):
74 filtered_children.append((name, obj))
75 return filtered_children
76
77
78 def main(unused_argv):
79 generate_cirq()
80 generate_cirq_google()
81 generate_cirq_aqt()
82 generate_cirq_ionq()
83 generate_cirq_pasqal()
84 generate_cirq_rigetti()
85 generate_cirq_web()
86
87
88 def generate_cirq():
89 doc_generator = generate_lib.DocGenerator(
90 root_title="Cirq",
91 py_modules=[("cirq", cirq)],
92 base_dir=os.path.dirname(cirq.__file__),
93 code_url_prefix=FLAGS.code_url_prefix + "/cirq-core/cirq",
94 search_hints=FLAGS.search_hints,
95 site_path=FLAGS.site_path,
96 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
97 extra_docs=_doc.RECORDED_CONST_DOCS,
98 )
99 doc_controls.decorate_all_class_attributes(
100 doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
101 )
102 doc_generator.build(output_dir=FLAGS.output_dir)
103
104
105 def generate_cirq_aqt():
106 doc_generator = generate_lib.DocGenerator(
107 root_title="Cirq-aqt",
108 py_modules=[("cirq_aqt", cirq_aqt)],
109 base_dir=os.path.dirname(cirq_aqt.__file__),
110 code_url_prefix=FLAGS.code_url_prefix + "/cirq-aqt/cirq_aqt",
111 search_hints=FLAGS.search_hints,
112 site_path=FLAGS.site_path,
113 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
114 extra_docs=_doc.RECORDED_CONST_DOCS,
115 )
116 doc_controls.decorate_all_class_attributes(
117 doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
118 )
119
120 doc_generator.build(output_dir=FLAGS.output_dir)
121
122
123 def generate_cirq_ionq():
124 doc_generator = generate_lib.DocGenerator(
125 root_title="Cirq_ionq",
126 py_modules=[("cirq_ionq", cirq_ionq)],
127 base_dir=os.path.dirname(cirq_ionq.__file__),
128 code_url_prefix=FLAGS.code_url_prefix + "/cirq-ionq/cirq_ionq",
129 search_hints=FLAGS.search_hints,
130 site_path=FLAGS.site_path,
131 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
132 extra_docs=_doc.RECORDED_CONST_DOCS,
133 )
134 doc_controls.decorate_all_class_attributes(
135 doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
136 )
137
138 doc_generator.build(output_dir=FLAGS.output_dir)
139
140
141 def generate_cirq_pasqal():
142 doc_generator = generate_lib.DocGenerator(
143 root_title="Cirq-pasqal",
144 py_modules=[("cirq_pasqal", cirq_pasqal)],
145 base_dir=os.path.dirname(cirq_pasqal.__file__),
146 code_url_prefix=FLAGS.code_url_prefix + "/cirq-pasqal/cirq_pasqal",
147 search_hints=FLAGS.search_hints,
148 site_path=FLAGS.site_path,
149 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
150 extra_docs=_doc.RECORDED_CONST_DOCS,
151 )
152 doc_controls.decorate_all_class_attributes(
153 doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
154 )
155
156 doc_generator.build(output_dir=FLAGS.output_dir)
157
158
159 def generate_cirq_rigetti():
160 doc_generator = generate_lib.DocGenerator(
161 root_title="Cirq_rigetti",
162 py_modules=[("cirq_rigetti", cirq_rigetti)],
163 base_dir=os.path.dirname(cirq_rigetti.__file__),
164 code_url_prefix=FLAGS.code_url_prefix + "/cirq-rigetti/cirq_rigetti",
165 search_hints=FLAGS.search_hints,
166 site_path=FLAGS.site_path,
167 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
168 extra_docs=_doc.RECORDED_CONST_DOCS,
169 )
170 doc_controls.decorate_all_class_attributes(
171 doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
172 )
173
174 doc_generator.build(output_dir=FLAGS.output_dir)
175
176
177 def generate_cirq_google():
178 doc_generator = generate_lib.DocGenerator(
179 root_title="Cirq-google",
180 py_modules=[("cirq_google", cirq_google)],
181 base_dir=os.path.dirname(cirq_google.__file__),
182 code_url_prefix=FLAGS.code_url_prefix + "/cirq-google/cirq_google",
183 search_hints=FLAGS.search_hints,
184 site_path=FLAGS.site_path,
185 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
186 private_map={
187 # Opt to not build docs for these paths for now since they error.
188 "cirq_google.cloud.quantum.QuantumEngineServiceClient": ["enums"],
189 "cirq_google.cloud.quantum_v1alpha1.QuantumEngineServiceClient": ["enums"],
190 "cirq_google.api": ["v1"],
191 },
192 extra_docs=_doc.RECORDED_CONST_DOCS,
193 )
194 doc_generator.build(output_dir=FLAGS.output_dir)
195
196
197 def generate_cirq_web():
198 doc_generator = generate_lib.DocGenerator(
199 root_title="Cirq_web",
200 py_modules=[("cirq_web", cirq_web)],
201 base_dir=os.path.dirname(cirq_web.__file__),
202 code_url_prefix=FLAGS.code_url_prefix + "/cirq-web/cirq_web",
203 search_hints=FLAGS.search_hints,
204 site_path=FLAGS.site_path,
205 callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
206 extra_docs=_doc.RECORDED_CONST_DOCS,
207 )
208 doc_controls.decorate_all_class_attributes(
209 doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]
210 )
211
212 doc_generator.build(output_dir=FLAGS.output_dir)
213
214
215 if __name__ == "__main__":
216 app.run(main)
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/dev_tools/docs/build_api_docs.py b/dev_tools/docs/build_api_docs.py
--- a/dev_tools/docs/build_api_docs.py
+++ b/dev_tools/docs/build_api_docs.py
@@ -75,6 +75,14 @@
return filtered_children
+def filter_type_checking(path, parent, children):
+ filtered_children = []
+ for name, obj in children:
+ if name != 'TYPE_CHECKING':
+ filtered_children.append((name, obj))
+ return filtered_children
+
+
def main(unused_argv):
generate_cirq()
generate_cirq_google()
@@ -93,7 +101,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-core/cirq",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
@@ -110,7 +122,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-aqt/cirq_aqt",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
@@ -128,7 +144,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-ionq/cirq_ionq",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
@@ -146,7 +166,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-pasqal/cirq_pasqal",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
@@ -164,7 +188,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-rigetti/cirq_rigetti",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
@@ -182,7 +210,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-google/cirq_google",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
private_map={
# Opt to not build docs for these paths for now since they error.
"cirq_google.cloud.quantum.QuantumEngineServiceClient": ["enums"],
@@ -202,7 +234,11 @@
code_url_prefix=FLAGS.code_url_prefix + "/cirq-web/cirq_web",
search_hints=FLAGS.search_hints,
site_path=FLAGS.site_path,
- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],
+ callbacks=[
+ public_api.local_definitions_filter,
+ filter_unwanted_inherited_methods,
+ filter_type_checking,
+ ],
extra_docs=_doc.RECORDED_CONST_DOCS,
)
doc_controls.decorate_all_class_attributes(
| {"golden_diff": "diff --git a/dev_tools/docs/build_api_docs.py b/dev_tools/docs/build_api_docs.py\n--- a/dev_tools/docs/build_api_docs.py\n+++ b/dev_tools/docs/build_api_docs.py\n@@ -75,6 +75,14 @@\n return filtered_children\n \n \n+def filter_type_checking(path, parent, children):\n+ filtered_children = []\n+ for name, obj in children:\n+ if name != 'TYPE_CHECKING':\n+ filtered_children.append((name, obj))\n+ return filtered_children\n+\n+\n def main(unused_argv):\n generate_cirq()\n generate_cirq_google()\n@@ -93,7 +101,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-core/cirq\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n@@ -110,7 +122,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-aqt/cirq_aqt\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n@@ -128,7 +144,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-ionq/cirq_ionq\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n@@ -146,7 +166,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-pasqal/cirq_pasqal\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n@@ -164,7 +188,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-rigetti/cirq_rigetti\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n@@ -182,7 +210,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-google/cirq_google\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n private_map={\n # Opt to not build docs for these paths for now since they error.\n \"cirq_google.cloud.quantum.QuantumEngineServiceClient\": [\"enums\"],\n@@ -202,7 +234,11 @@\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-web/cirq_web\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n- callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n+ callbacks=[\n+ public_api.local_definitions_filter,\n+ filter_unwanted_inherited_methods,\n+ filter_type_checking,\n+ ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n", "issue": "Docs: Filter out TYPE_CHECKING from public docs\n**Description of the issue**\r\n\r\nThe `TYPE_CHECKING` variable imported from `typing` shows up in API docs (example: https://github.com/quantumlib/Cirq/issues/5150). We should filter it out, since it's not part of the cirq API. Per @dabacon's [comment](https://github.com/quantumlib/Cirq/pull/5229#issuecomment-1093080151), we should be able to do this in `dev_tools/docs/build_api_docs.py`.\r\n\n", "before_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tool to generate external api_docs for Cirq.\n\nIn order to publish to our site, devsite runs two jobs for us: stable and nightly.\nThe stable one downloads the latest cirq release from pypi and uses that to generate the reference\nAPI docs.\nThe nightly one downloads the latest cirq pre-release (pip install cirq --pre) and uses that to\ngenerate the \"nightly diff\".\n\nThis script needs to cater for both of these cases.\n\"\"\"\n\nimport os\nimport types\n\nimport networkx\nfrom absl import app\nfrom absl import flags\nfrom tensorflow_docs.api_generator import doc_controls\nfrom tensorflow_docs.api_generator import generate_lib\nfrom tensorflow_docs.api_generator import public_api\n\nimport cirq\nimport cirq_aqt\nimport cirq_google\nimport cirq_ionq\nimport cirq_pasqal\nimport cirq_rigetti\nimport cirq_web\n\nfrom cirq import _doc\n\nflags.DEFINE_string(\"output_dir\", \"docs/api_docs\", \"Where to output the docs\")\n\nflags.DEFINE_string(\n \"code_url_prefix\",\n \"https://github.com/quantumlib/Cirq/blob/master\",\n \"The url prefix for links to code.\",\n)\n\nflags.DEFINE_bool(\"search_hints\", True, \"Include metadata search hints in the generated files\")\n\nflags.DEFINE_string(\"site_path\", \"reference/python\", \"Path prefix in the _toc.yaml\")\n\nFLAGS = flags.FLAGS\n\n\ndef filter_unwanted_inherited_methods(path, parent, children):\n \"\"\"Filter the unwanted inherited methods.\n\n CircuitDag inherits a lot of methods from `networkx.DiGraph` and `Graph`.\n This filter removes these, as it creates a lot of noise in the API docs.\n \"\"\"\n if parent.__name__ != \"CircuitDag\":\n return children\n\n filtered_children = []\n for name, obj in children:\n if isinstance(obj, types.FunctionType):\n if obj.__module__.startswith('cirq'):\n filtered_children.append((name, obj))\n return filtered_children\n\n\ndef main(unused_argv):\n generate_cirq()\n generate_cirq_google()\n generate_cirq_aqt()\n generate_cirq_ionq()\n generate_cirq_pasqal()\n generate_cirq_rigetti()\n generate_cirq_web()\n\n\ndef generate_cirq():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq\",\n py_modules=[(\"cirq\", cirq)],\n base_dir=os.path.dirname(cirq.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-core/cirq\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_aqt():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq-aqt\",\n py_modules=[(\"cirq_aqt\", cirq_aqt)],\n base_dir=os.path.dirname(cirq_aqt.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-aqt/cirq_aqt\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_ionq():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq_ionq\",\n py_modules=[(\"cirq_ionq\", cirq_ionq)],\n base_dir=os.path.dirname(cirq_ionq.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-ionq/cirq_ionq\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_pasqal():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq-pasqal\",\n py_modules=[(\"cirq_pasqal\", cirq_pasqal)],\n base_dir=os.path.dirname(cirq_pasqal.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-pasqal/cirq_pasqal\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_rigetti():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq_rigetti\",\n py_modules=[(\"cirq_rigetti\", cirq_rigetti)],\n base_dir=os.path.dirname(cirq_rigetti.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-rigetti/cirq_rigetti\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_google():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq-google\",\n py_modules=[(\"cirq_google\", cirq_google)],\n base_dir=os.path.dirname(cirq_google.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-google/cirq_google\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n private_map={\n # Opt to not build docs for these paths for now since they error.\n \"cirq_google.cloud.quantum.QuantumEngineServiceClient\": [\"enums\"],\n \"cirq_google.cloud.quantum_v1alpha1.QuantumEngineServiceClient\": [\"enums\"],\n \"cirq_google.api\": [\"v1\"],\n },\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_web():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq_web\",\n py_modules=[(\"cirq_web\", cirq_web)],\n base_dir=os.path.dirname(cirq_web.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-web/cirq_web\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[public_api.local_definitions_filter, filter_unwanted_inherited_methods],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n", "path": "dev_tools/docs/build_api_docs.py"}], "after_files": [{"content": "# Copyright 2021 The Cirq Developers\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tool to generate external api_docs for Cirq.\n\nIn order to publish to our site, devsite runs two jobs for us: stable and nightly.\nThe stable one downloads the latest cirq release from pypi and uses that to generate the reference\nAPI docs.\nThe nightly one downloads the latest cirq pre-release (pip install cirq --pre) and uses that to\ngenerate the \"nightly diff\".\n\nThis script needs to cater for both of these cases.\n\"\"\"\n\nimport os\nimport types\n\nimport networkx\nfrom absl import app\nfrom absl import flags\nfrom tensorflow_docs.api_generator import doc_controls\nfrom tensorflow_docs.api_generator import generate_lib\nfrom tensorflow_docs.api_generator import public_api\n\nimport cirq\nimport cirq_aqt\nimport cirq_google\nimport cirq_ionq\nimport cirq_pasqal\nimport cirq_rigetti\nimport cirq_web\n\nfrom cirq import _doc\n\nflags.DEFINE_string(\"output_dir\", \"docs/api_docs\", \"Where to output the docs\")\n\nflags.DEFINE_string(\n \"code_url_prefix\",\n \"https://github.com/quantumlib/Cirq/blob/master\",\n \"The url prefix for links to code.\",\n)\n\nflags.DEFINE_bool(\"search_hints\", True, \"Include metadata search hints in the generated files\")\n\nflags.DEFINE_string(\"site_path\", \"reference/python\", \"Path prefix in the _toc.yaml\")\n\nFLAGS = flags.FLAGS\n\n\ndef filter_unwanted_inherited_methods(path, parent, children):\n \"\"\"Filter the unwanted inherited methods.\n\n CircuitDag inherits a lot of methods from `networkx.DiGraph` and `Graph`.\n This filter removes these, as it creates a lot of noise in the API docs.\n \"\"\"\n if parent.__name__ != \"CircuitDag\":\n return children\n\n filtered_children = []\n for name, obj in children:\n if isinstance(obj, types.FunctionType):\n if obj.__module__.startswith('cirq'):\n filtered_children.append((name, obj))\n return filtered_children\n\n\ndef filter_type_checking(path, parent, children):\n filtered_children = []\n for name, obj in children:\n if name != 'TYPE_CHECKING':\n filtered_children.append((name, obj))\n return filtered_children\n\n\ndef main(unused_argv):\n generate_cirq()\n generate_cirq_google()\n generate_cirq_aqt()\n generate_cirq_ionq()\n generate_cirq_pasqal()\n generate_cirq_rigetti()\n generate_cirq_web()\n\n\ndef generate_cirq():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq\",\n py_modules=[(\"cirq\", cirq)],\n base_dir=os.path.dirname(cirq.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-core/cirq\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_aqt():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq-aqt\",\n py_modules=[(\"cirq_aqt\", cirq_aqt)],\n base_dir=os.path.dirname(cirq_aqt.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-aqt/cirq_aqt\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_ionq():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq_ionq\",\n py_modules=[(\"cirq_ionq\", cirq_ionq)],\n base_dir=os.path.dirname(cirq_ionq.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-ionq/cirq_ionq\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_pasqal():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq-pasqal\",\n py_modules=[(\"cirq_pasqal\", cirq_pasqal)],\n base_dir=os.path.dirname(cirq_pasqal.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-pasqal/cirq_pasqal\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_rigetti():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq_rigetti\",\n py_modules=[(\"cirq_rigetti\", cirq_rigetti)],\n base_dir=os.path.dirname(cirq_rigetti.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-rigetti/cirq_rigetti\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_google():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq-google\",\n py_modules=[(\"cirq_google\", cirq_google)],\n base_dir=os.path.dirname(cirq_google.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-google/cirq_google\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n private_map={\n # Opt to not build docs for these paths for now since they error.\n \"cirq_google.cloud.quantum.QuantumEngineServiceClient\": [\"enums\"],\n \"cirq_google.cloud.quantum_v1alpha1.QuantumEngineServiceClient\": [\"enums\"],\n \"cirq_google.api\": [\"v1\"],\n },\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\ndef generate_cirq_web():\n doc_generator = generate_lib.DocGenerator(\n root_title=\"Cirq_web\",\n py_modules=[(\"cirq_web\", cirq_web)],\n base_dir=os.path.dirname(cirq_web.__file__),\n code_url_prefix=FLAGS.code_url_prefix + \"/cirq-web/cirq_web\",\n search_hints=FLAGS.search_hints,\n site_path=FLAGS.site_path,\n callbacks=[\n public_api.local_definitions_filter,\n filter_unwanted_inherited_methods,\n filter_type_checking,\n ],\n extra_docs=_doc.RECORDED_CONST_DOCS,\n )\n doc_controls.decorate_all_class_attributes(\n doc_controls.do_not_doc_inheritable, networkx.DiGraph, skip=[]\n )\n\n doc_generator.build(output_dir=FLAGS.output_dir)\n\n\nif __name__ == \"__main__\":\n app.run(main)\n", "path": "dev_tools/docs/build_api_docs.py"}]} | 2,754 | 1,006 |
gh_patches_debug_23108 | rasdani/github-patches | git_diff | keras-team__autokeras-568 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Evaluation criteria for MLP
Are there any evaluation criterias for MLP module in Autokeras?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `autokeras/net_module.py`
Content:
```
1 from functools import reduce
2
3 import torch
4 import numpy as np
5
6 import os
7 import time
8
9 from autokeras.constant import Constant
10 from autokeras.search import BayesianSearcher, train
11
12 from autokeras.utils import pickle_to_file, rand_temp_folder_generator, ensure_dir
13 from autokeras.nn.generator import CnnGenerator, MlpGenerator, ResNetGenerator, DenseNetGenerator
14
15
16 class NetworkModule:
17 """ Class to create a network module.
18
19 Attributes:
20 loss: A function taking two parameters, the predictions and the ground truth.
21 metric: An instance of the Metric subclasses.
22 searcher_args: A dictionary containing the parameters for the searcher's __init__ function.
23 searcher: An instance of the Searcher class.
24 path: A string. The path to the directory to save the searcher.
25 verbose: A boolean. Setting it to true prints to stdout.
26 generators: A list of instances of the NetworkGenerator class or its subclasses.
27 search_type: A constant denoting the type of hyperparameter search algorithm that must be used.
28 """
29
30 def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False, search_type=BayesianSearcher):
31 self.searcher_args = searcher_args if searcher_args is not None else {}
32 self.searcher = None
33 self.path = path if path is not None else rand_temp_folder_generator()
34 ensure_dir(self.path)
35 if verbose:
36 print('Saving Directory:', self.path)
37 self.verbose = verbose
38 self.loss = loss
39 self.metric = metric
40 self.generators = []
41 self.search_type = search_type
42
43 def fit(self, n_output_node, input_shape, train_data, test_data, time_limit=24 * 60 * 60):
44 """ Search the best network.
45
46 Args:
47 n_output_node: A integer value represent the number of output node in the final layer.
48 input_shape: A tuple to express the shape of every train entry. For example,
49 MNIST dataset would be (28,28,1).
50 train_data: A PyTorch DataLoader instance representing the training data.
51 test_data: A PyTorch DataLoader instance representing the testing data.
52 time_limit: A integer value represents the time limit on searching for models.
53 """
54 # Create the searcher and save on disk
55
56 if not self.searcher:
57 input_shape = input_shape[1:]
58 self.searcher_args['n_output_node'] = n_output_node
59 self.searcher_args['input_shape'] = input_shape
60 self.searcher_args['path'] = self.path
61 self.searcher_args['metric'] = self.metric
62 self.searcher_args['loss'] = self.loss
63 self.searcher_args['generators'] = self.generators
64 self.searcher_args['verbose'] = self.verbose
65 pickle_to_file(self, os.path.join(self.path, 'module'))
66 self.searcher = self.search_type(**self.searcher_args)
67
68 start_time = time.time()
69 time_remain = time_limit
70 try:
71 while time_remain > 0:
72 self.searcher.search(train_data, test_data, int(time_remain))
73 pickle_to_file(self, os.path.join(self.path, 'module'))
74 if len(self.searcher.history) >= Constant.MAX_MODEL_NUM:
75 break
76 time_elapsed = time.time() - start_time
77 time_remain = time_limit - time_elapsed
78 # if no search executed during the time_limit, then raise an error
79 if time_remain <= 0:
80 raise TimeoutError
81 except TimeoutError:
82 if len(self.searcher.history) == 0:
83 raise TimeoutError("Search Time too short. No model was found during the search time.")
84 elif self.verbose:
85 print('Time is out.')
86
87 def final_fit(self, train_data, test_data, trainer_args=None, retrain=False):
88 """Final training after found the best architecture.
89
90 Args:
91 train_data: A DataLoader instance representing the training data.
92 test_data: A DataLoader instance representing the testing data.
93 trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.
94 retrain: A boolean of whether reinitialize the weights of the model.
95 """
96 graph = self.searcher.load_best_model()
97
98 if retrain:
99 graph.weighted = False
100 _, _1, graph = train(None, graph,
101 train_data,
102 test_data,
103 trainer_args,
104 self.metric,
105 self.loss,
106 self.verbose,
107 self.path)
108 self.searcher.replace_model(graph, self.searcher.get_best_model_id())
109 pickle_to_file(self, os.path.join(self.path, 'module'))
110
111 @property
112 def best_model(self):
113 return self.searcher.load_best_model()
114
115 def predict(self, test_loader):
116 model = self.best_model.produce_model()
117 model.eval()
118
119 outputs = []
120 with torch.no_grad():
121 for index, inputs in enumerate(test_loader):
122 outputs.append(model(inputs).numpy())
123 output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
124 return output
125
126
127 class CnnModule(NetworkModule):
128 """ Class to create a CNN module."""
129
130 def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False,
131 search_type=BayesianSearcher):
132 super(CnnModule, self).__init__(loss, metric, searcher_args, path, verbose, search_type)
133 self.generators.append(CnnGenerator)
134 self.generators.append(ResNetGenerator)
135 self.generators.append(DenseNetGenerator)
136
137
138 class MlpModule(NetworkModule):
139 """ Class to create an MLP module."""
140
141 def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False):
142 super(MlpModule, self).__init__(loss, metric, searcher_args, path, verbose)
143 self.generators.extend([MlpGenerator] * 2)
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/autokeras/net_module.py b/autokeras/net_module.py
--- a/autokeras/net_module.py
+++ b/autokeras/net_module.py
@@ -11,6 +11,7 @@
from autokeras.utils import pickle_to_file, rand_temp_folder_generator, ensure_dir
from autokeras.nn.generator import CnnGenerator, MlpGenerator, ResNetGenerator, DenseNetGenerator
+from autokeras.utils import get_device
class NetworkModule:
@@ -123,6 +124,24 @@
output = reduce(lambda x, y: np.concatenate((x, y)), outputs)
return output
+ def evaluate(self, test_data):
+ """Evaluate the performance of the best architecture in terms of the loss.
+
+ Args:
+ test_data: A DataLoader instance representing the testing data.
+ """
+ model = self.best_model.produce_model()
+ model.eval()
+ device = get_device()
+ target, prediction = [], []
+
+ with torch.no_grad():
+ for _, (x, y) in enumerate(test_data):
+ x, y = x.to(device), y.to(device)
+ prediction.append(model(x))
+ target.append(y)
+ return self.metric().compute(prediction, target)
+
class CnnModule(NetworkModule):
""" Class to create a CNN module."""
| {"golden_diff": "diff --git a/autokeras/net_module.py b/autokeras/net_module.py\n--- a/autokeras/net_module.py\n+++ b/autokeras/net_module.py\n@@ -11,6 +11,7 @@\n \n from autokeras.utils import pickle_to_file, rand_temp_folder_generator, ensure_dir\n from autokeras.nn.generator import CnnGenerator, MlpGenerator, ResNetGenerator, DenseNetGenerator\n+from autokeras.utils import get_device\n \n \n class NetworkModule:\n@@ -123,6 +124,24 @@\n output = reduce(lambda x, y: np.concatenate((x, y)), outputs)\n return output\n \n+ def evaluate(self, test_data):\n+ \"\"\"Evaluate the performance of the best architecture in terms of the loss.\n+\n+ Args:\n+ test_data: A DataLoader instance representing the testing data.\n+ \"\"\"\n+ model = self.best_model.produce_model()\n+ model.eval()\n+ device = get_device()\n+ target, prediction = [], []\n+\n+ with torch.no_grad():\n+ for _, (x, y) in enumerate(test_data):\n+ x, y = x.to(device), y.to(device)\n+ prediction.append(model(x))\n+ target.append(y)\n+ return self.metric().compute(prediction, target)\n+\n \n class CnnModule(NetworkModule):\n \"\"\" Class to create a CNN module.\"\"\"\n", "issue": "Evaluation criteria for MLP\nAre there any evaluation criterias for MLP module in Autokeras? \r\n\n", "before_files": [{"content": "from functools import reduce\n\nimport torch\nimport numpy as np\n\nimport os\nimport time\n\nfrom autokeras.constant import Constant\nfrom autokeras.search import BayesianSearcher, train\n\nfrom autokeras.utils import pickle_to_file, rand_temp_folder_generator, ensure_dir\nfrom autokeras.nn.generator import CnnGenerator, MlpGenerator, ResNetGenerator, DenseNetGenerator\n\n\nclass NetworkModule:\n \"\"\" Class to create a network module.\n\n Attributes:\n loss: A function taking two parameters, the predictions and the ground truth.\n metric: An instance of the Metric subclasses.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n searcher: An instance of the Searcher class.\n path: A string. The path to the directory to save the searcher.\n verbose: A boolean. Setting it to true prints to stdout.\n generators: A list of instances of the NetworkGenerator class or its subclasses.\n search_type: A constant denoting the type of hyperparameter search algorithm that must be used.\n \"\"\"\n\n def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False, search_type=BayesianSearcher):\n self.searcher_args = searcher_args if searcher_args is not None else {}\n self.searcher = None\n self.path = path if path is not None else rand_temp_folder_generator()\n ensure_dir(self.path)\n if verbose:\n print('Saving Directory:', self.path)\n self.verbose = verbose\n self.loss = loss\n self.metric = metric\n self.generators = []\n self.search_type = search_type\n\n def fit(self, n_output_node, input_shape, train_data, test_data, time_limit=24 * 60 * 60):\n \"\"\" Search the best network.\n\n Args:\n n_output_node: A integer value represent the number of output node in the final layer.\n input_shape: A tuple to express the shape of every train entry. For example,\n MNIST dataset would be (28,28,1).\n train_data: A PyTorch DataLoader instance representing the training data.\n test_data: A PyTorch DataLoader instance representing the testing data.\n time_limit: A integer value represents the time limit on searching for models.\n \"\"\"\n # Create the searcher and save on disk\n\n if not self.searcher:\n input_shape = input_shape[1:]\n self.searcher_args['n_output_node'] = n_output_node\n self.searcher_args['input_shape'] = input_shape\n self.searcher_args['path'] = self.path\n self.searcher_args['metric'] = self.metric\n self.searcher_args['loss'] = self.loss\n self.searcher_args['generators'] = self.generators\n self.searcher_args['verbose'] = self.verbose\n pickle_to_file(self, os.path.join(self.path, 'module'))\n self.searcher = self.search_type(**self.searcher_args)\n\n start_time = time.time()\n time_remain = time_limit\n try:\n while time_remain > 0:\n self.searcher.search(train_data, test_data, int(time_remain))\n pickle_to_file(self, os.path.join(self.path, 'module'))\n if len(self.searcher.history) >= Constant.MAX_MODEL_NUM:\n break\n time_elapsed = time.time() - start_time\n time_remain = time_limit - time_elapsed\n # if no search executed during the time_limit, then raise an error\n if time_remain <= 0:\n raise TimeoutError\n except TimeoutError:\n if len(self.searcher.history) == 0:\n raise TimeoutError(\"Search Time too short. No model was found during the search time.\")\n elif self.verbose:\n print('Time is out.')\n\n def final_fit(self, train_data, test_data, trainer_args=None, retrain=False):\n \"\"\"Final training after found the best architecture.\n\n Args:\n train_data: A DataLoader instance representing the training data.\n test_data: A DataLoader instance representing the testing data.\n trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.\n retrain: A boolean of whether reinitialize the weights of the model.\n \"\"\"\n graph = self.searcher.load_best_model()\n\n if retrain:\n graph.weighted = False\n _, _1, graph = train(None, graph,\n train_data,\n test_data,\n trainer_args,\n self.metric,\n self.loss,\n self.verbose,\n self.path)\n self.searcher.replace_model(graph, self.searcher.get_best_model_id())\n pickle_to_file(self, os.path.join(self.path, 'module'))\n\n @property\n def best_model(self):\n return self.searcher.load_best_model()\n\n def predict(self, test_loader):\n model = self.best_model.produce_model()\n model.eval()\n\n outputs = []\n with torch.no_grad():\n for index, inputs in enumerate(test_loader):\n outputs.append(model(inputs).numpy())\n output = reduce(lambda x, y: np.concatenate((x, y)), outputs)\n return output\n\n\nclass CnnModule(NetworkModule):\n \"\"\" Class to create a CNN module.\"\"\"\n\n def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False,\n search_type=BayesianSearcher):\n super(CnnModule, self).__init__(loss, metric, searcher_args, path, verbose, search_type)\n self.generators.append(CnnGenerator)\n self.generators.append(ResNetGenerator)\n self.generators.append(DenseNetGenerator)\n\n\nclass MlpModule(NetworkModule):\n \"\"\" Class to create an MLP module.\"\"\"\n\n def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False):\n super(MlpModule, self).__init__(loss, metric, searcher_args, path, verbose)\n self.generators.extend([MlpGenerator] * 2)\n", "path": "autokeras/net_module.py"}], "after_files": [{"content": "from functools import reduce\n\nimport torch\nimport numpy as np\n\nimport os\nimport time\n\nfrom autokeras.constant import Constant\nfrom autokeras.search import BayesianSearcher, train\n\nfrom autokeras.utils import pickle_to_file, rand_temp_folder_generator, ensure_dir\nfrom autokeras.nn.generator import CnnGenerator, MlpGenerator, ResNetGenerator, DenseNetGenerator\nfrom autokeras.utils import get_device\n\n\nclass NetworkModule:\n \"\"\" Class to create a network module.\n\n Attributes:\n loss: A function taking two parameters, the predictions and the ground truth.\n metric: An instance of the Metric subclasses.\n searcher_args: A dictionary containing the parameters for the searcher's __init__ function.\n searcher: An instance of the Searcher class.\n path: A string. The path to the directory to save the searcher.\n verbose: A boolean. Setting it to true prints to stdout.\n generators: A list of instances of the NetworkGenerator class or its subclasses.\n search_type: A constant denoting the type of hyperparameter search algorithm that must be used.\n \"\"\"\n\n def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False, search_type=BayesianSearcher):\n self.searcher_args = searcher_args if searcher_args is not None else {}\n self.searcher = None\n self.path = path if path is not None else rand_temp_folder_generator()\n ensure_dir(self.path)\n if verbose:\n print('Saving Directory:', self.path)\n self.verbose = verbose\n self.loss = loss\n self.metric = metric\n self.generators = []\n self.search_type = search_type\n\n def fit(self, n_output_node, input_shape, train_data, test_data, time_limit=24 * 60 * 60):\n \"\"\" Search the best network.\n\n Args:\n n_output_node: A integer value represent the number of output node in the final layer.\n input_shape: A tuple to express the shape of every train entry. For example,\n MNIST dataset would be (28,28,1).\n train_data: A PyTorch DataLoader instance representing the training data.\n test_data: A PyTorch DataLoader instance representing the testing data.\n time_limit: A integer value represents the time limit on searching for models.\n \"\"\"\n # Create the searcher and save on disk\n\n if not self.searcher:\n input_shape = input_shape[1:]\n self.searcher_args['n_output_node'] = n_output_node\n self.searcher_args['input_shape'] = input_shape\n self.searcher_args['path'] = self.path\n self.searcher_args['metric'] = self.metric\n self.searcher_args['loss'] = self.loss\n self.searcher_args['generators'] = self.generators\n self.searcher_args['verbose'] = self.verbose\n pickle_to_file(self, os.path.join(self.path, 'module'))\n self.searcher = self.search_type(**self.searcher_args)\n\n start_time = time.time()\n time_remain = time_limit\n try:\n while time_remain > 0:\n self.searcher.search(train_data, test_data, int(time_remain))\n pickle_to_file(self, os.path.join(self.path, 'module'))\n if len(self.searcher.history) >= Constant.MAX_MODEL_NUM:\n break\n time_elapsed = time.time() - start_time\n time_remain = time_limit - time_elapsed\n # if no search executed during the time_limit, then raise an error\n if time_remain <= 0:\n raise TimeoutError\n except TimeoutError:\n if len(self.searcher.history) == 0:\n raise TimeoutError(\"Search Time too short. No model was found during the search time.\")\n elif self.verbose:\n print('Time is out.')\n\n def final_fit(self, train_data, test_data, trainer_args=None, retrain=False):\n \"\"\"Final training after found the best architecture.\n\n Args:\n train_data: A DataLoader instance representing the training data.\n test_data: A DataLoader instance representing the testing data.\n trainer_args: A dictionary containing the parameters of the ModelTrainer constructor.\n retrain: A boolean of whether reinitialize the weights of the model.\n \"\"\"\n graph = self.searcher.load_best_model()\n\n if retrain:\n graph.weighted = False\n _, _1, graph = train(None, graph,\n train_data,\n test_data,\n trainer_args,\n self.metric,\n self.loss,\n self.verbose,\n self.path)\n self.searcher.replace_model(graph, self.searcher.get_best_model_id())\n pickle_to_file(self, os.path.join(self.path, 'module'))\n\n @property\n def best_model(self):\n return self.searcher.load_best_model()\n\n def predict(self, test_loader):\n model = self.best_model.produce_model()\n model.eval()\n\n outputs = []\n with torch.no_grad():\n for index, inputs in enumerate(test_loader):\n outputs.append(model(inputs).numpy())\n output = reduce(lambda x, y: np.concatenate((x, y)), outputs)\n return output\n\n def evaluate(self, test_data):\n \"\"\"Evaluate the performance of the best architecture in terms of the loss.\n\n Args:\n test_data: A DataLoader instance representing the testing data.\n \"\"\"\n model = self.best_model.produce_model()\n model.eval()\n device = get_device()\n target, prediction = [], []\n\n with torch.no_grad():\n for _, (x, y) in enumerate(test_data):\n x, y = x.to(device), y.to(device)\n prediction.append(model(x))\n target.append(y)\n return self.metric().compute(prediction, target)\n\n\nclass CnnModule(NetworkModule):\n \"\"\" Class to create a CNN module.\"\"\"\n\n def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False,\n search_type=BayesianSearcher):\n super(CnnModule, self).__init__(loss, metric, searcher_args, path, verbose, search_type)\n self.generators.append(CnnGenerator)\n self.generators.append(ResNetGenerator)\n self.generators.append(DenseNetGenerator)\n\n\nclass MlpModule(NetworkModule):\n \"\"\" Class to create an MLP module.\"\"\"\n\n def __init__(self, loss, metric, searcher_args=None, path=None, verbose=False):\n super(MlpModule, self).__init__(loss, metric, searcher_args, path, verbose)\n self.generators.extend([MlpGenerator] * 2)\n", "path": "autokeras/net_module.py"}]} | 1,870 | 301 |
gh_patches_debug_23667 | rasdani/github-patches | git_diff | pandas-dev__pandas-6803 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
numexpr 2.3.1 error with pandas 0.13.1
I just installed numexpr 2.3.1 with pandas 0.13.1 and got the following error:
File "C:\Python27\lib\site-packages\pandas\core\ops.py", line 496, in wrapper
arr = na_op(lvalues, rvalues)
File "C:\Python27\lib\site-packages\pandas\core\ops.py", line 443, in na_op
raise_on_error=True, *_eval_kwargs)
File "C:\Python27\lib\site-packages\pandas\computation\expressions.py", line 176, in evaluate
*_eval_kwargs)
File "C:\Python27\lib\site-packages\pandas\computation\expressions.py", line 104, in _evaluate_numexpr
*_eval_kwargs)
File "C:\Python27\lib\site-packages\numexpr\necompiler.py", line 738, in evaluate
NumExpr(ex, signature, *_context)
File "C:\Python27\lib\site-packages\numexpr\necompiler.py", line 554, in NumExpr
precompile(ex, signature, context)
File "C:\Python27\lib\site-packages\numexpr\necompiler.py", line 498, in precompile
ast = typeCompileAst(ast)
File "C:\Python27\lib\site-packages\numexpr\necompiler.py", line 163, in typeCompileAst
% (ast.value + '_' + retsig+basesig))
NotImplementedError: couldn't find matching opcode for 'mul_bbb'
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/computation/expressions.py`
Content:
```
1 """
2 Expressions
3 -----------
4
5 Offer fast expression evaluation through numexpr
6
7 """
8
9 import numpy as np
10 from pandas.core.common import _values_from_object
11 from distutils.version import LooseVersion
12
13 try:
14 import numexpr as ne
15 _NUMEXPR_INSTALLED = ne.__version__ >= LooseVersion('2.0')
16 except ImportError: # pragma: no cover
17 _NUMEXPR_INSTALLED = False
18
19 _TEST_MODE = None
20 _TEST_RESULT = None
21 _USE_NUMEXPR = _NUMEXPR_INSTALLED
22 _evaluate = None
23 _where = None
24
25 # the set of dtypes that we will allow pass to numexpr
26 _ALLOWED_DTYPES = {
27 'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),
28 'where': set(['int64', 'float64', 'bool'])
29 }
30
31 # the minimum prod shape that we will use numexpr
32 _MIN_ELEMENTS = 10000
33
34
35 def set_use_numexpr(v=True):
36 # set/unset to use numexpr
37 global _USE_NUMEXPR
38 if _NUMEXPR_INSTALLED:
39 _USE_NUMEXPR = v
40
41 # choose what we are going to do
42 global _evaluate, _where
43 if not _USE_NUMEXPR:
44 _evaluate = _evaluate_standard
45 _where = _where_standard
46 else:
47 _evaluate = _evaluate_numexpr
48 _where = _where_numexpr
49
50
51 def set_numexpr_threads(n=None):
52 # if we are using numexpr, set the threads to n
53 # otherwise reset
54 if _NUMEXPR_INSTALLED and _USE_NUMEXPR:
55 if n is None:
56 n = ne.detect_number_of_cores()
57 ne.set_num_threads(n)
58
59
60 def _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs):
61 """ standard evaluation """
62 if _TEST_MODE:
63 _store_test_result(False)
64 return op(a, b)
65
66
67 def _can_use_numexpr(op, op_str, a, b, dtype_check):
68 """ return a boolean if we WILL be using numexpr """
69 if op_str is not None:
70
71 # required min elements (otherwise we are adding overhead)
72 if np.prod(a.shape) > _MIN_ELEMENTS:
73
74 # check for dtype compatiblity
75 dtypes = set()
76 for o in [a, b]:
77 if hasattr(o, 'get_dtype_counts'):
78 s = o.get_dtype_counts()
79 if len(s) > 1:
80 return False
81 dtypes |= set(s.index)
82 elif isinstance(o, np.ndarray):
83 dtypes |= set([o.dtype.name])
84
85 # allowed are a superset
86 if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:
87 return True
88
89 return False
90
91
92 def _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True,
93 **eval_kwargs):
94 result = None
95
96 if _can_use_numexpr(op, op_str, a, b, 'evaluate'):
97 try:
98 a_value = getattr(a, "values", a)
99 b_value = getattr(b, "values", b)
100 result = ne.evaluate('a_value %s b_value' % op_str,
101 local_dict={'a_value': a_value,
102 'b_value': b_value},
103 casting='safe', truediv=truediv,
104 **eval_kwargs)
105 except ValueError as detail:
106 if 'unknown type object' in str(detail):
107 pass
108 except Exception as detail:
109 if raise_on_error:
110 raise
111
112 if _TEST_MODE:
113 _store_test_result(result is not None)
114
115 if result is None:
116 result = _evaluate_standard(op, op_str, a, b, raise_on_error)
117
118 return result
119
120
121 def _where_standard(cond, a, b, raise_on_error=True):
122 return np.where(_values_from_object(cond), _values_from_object(a),
123 _values_from_object(b))
124
125
126 def _where_numexpr(cond, a, b, raise_on_error=False):
127 result = None
128
129 if _can_use_numexpr(None, 'where', a, b, 'where'):
130
131 try:
132 cond_value = getattr(cond, 'values', cond)
133 a_value = getattr(a, 'values', a)
134 b_value = getattr(b, 'values', b)
135 result = ne.evaluate('where(cond_value, a_value, b_value)',
136 local_dict={'cond_value': cond_value,
137 'a_value': a_value,
138 'b_value': b_value},
139 casting='safe')
140 except ValueError as detail:
141 if 'unknown type object' in str(detail):
142 pass
143 except Exception as detail:
144 if raise_on_error:
145 raise TypeError(str(detail))
146
147 if result is None:
148 result = _where_standard(cond, a, b, raise_on_error)
149
150 return result
151
152
153 # turn myself on
154 set_use_numexpr(True)
155
156
157 def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True,
158 **eval_kwargs):
159 """ evaluate and return the expression of the op on a and b
160
161 Parameters
162 ----------
163
164 op : the actual operand
165 op_str: the string version of the op
166 a : left operand
167 b : right operand
168 raise_on_error : pass the error to the higher level if indicated
169 (default is False), otherwise evaluate the op with and
170 return the results
171 use_numexpr : whether to try to use numexpr (default True)
172 """
173
174 if use_numexpr:
175 return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error,
176 **eval_kwargs)
177 return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)
178
179
180 def where(cond, a, b, raise_on_error=False, use_numexpr=True):
181 """ evaluate the where condition cond on a and b
182
183 Parameters
184 ----------
185
186 cond : a boolean array
187 a : return if cond is True
188 b : return if cond is False
189 raise_on_error : pass the error to the higher level if indicated
190 (default is False), otherwise evaluate the op with and
191 return the results
192 use_numexpr : whether to try to use numexpr (default True)
193 """
194
195 if use_numexpr:
196 return _where(cond, a, b, raise_on_error=raise_on_error)
197 return _where_standard(cond, a, b, raise_on_error=raise_on_error)
198
199
200 def set_test_mode(v=True):
201 """
202 Keeps track of whether numexpr was used. Stores an additional ``True``
203 for every successful use of evaluate with numexpr since the last
204 ``get_test_result``
205 """
206 global _TEST_MODE, _TEST_RESULT
207 _TEST_MODE = v
208 _TEST_RESULT = []
209
210
211 def _store_test_result(used_numexpr):
212 global _TEST_RESULT
213 if used_numexpr:
214 _TEST_RESULT.append(used_numexpr)
215
216
217 def get_test_result():
218 """get test result and reset test_results"""
219 global _TEST_RESULT
220 res = _TEST_RESULT
221 _TEST_RESULT = []
222 return res
223
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py
--- a/pandas/computation/expressions.py
+++ b/pandas/computation/expressions.py
@@ -154,6 +154,20 @@
set_use_numexpr(True)
+def _has_bool_dtype(x):
+ try:
+ return x.dtype == bool
+ except AttributeError:
+ return 'bool' in x.blocks
+
+
+def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/',
+ '//', '**'))):
+ if op_str in not_allowed and _has_bool_dtype(a) and _has_bool_dtype(b):
+ raise NotImplementedError("operator %r not implemented for bool "
+ "dtypes" % op_str)
+
+
def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True,
**eval_kwargs):
""" evaluate and return the expression of the op on a and b
@@ -170,7 +184,7 @@
return the results
use_numexpr : whether to try to use numexpr (default True)
"""
-
+ _bool_arith_check(op_str, a, b)
if use_numexpr:
return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error,
**eval_kwargs)
| {"golden_diff": "diff --git a/pandas/computation/expressions.py b/pandas/computation/expressions.py\n--- a/pandas/computation/expressions.py\n+++ b/pandas/computation/expressions.py\n@@ -154,6 +154,20 @@\n set_use_numexpr(True)\n \n \n+def _has_bool_dtype(x):\n+ try:\n+ return x.dtype == bool\n+ except AttributeError:\n+ return 'bool' in x.blocks\n+\n+\n+def _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/',\n+ '//', '**'))):\n+ if op_str in not_allowed and _has_bool_dtype(a) and _has_bool_dtype(b):\n+ raise NotImplementedError(\"operator %r not implemented for bool \"\n+ \"dtypes\" % op_str)\n+\n+\n def evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True,\n **eval_kwargs):\n \"\"\" evaluate and return the expression of the op on a and b\n@@ -170,7 +184,7 @@\n return the results\n use_numexpr : whether to try to use numexpr (default True)\n \"\"\"\n-\n+ _bool_arith_check(op_str, a, b)\n if use_numexpr:\n return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error,\n **eval_kwargs)\n", "issue": "numexpr 2.3.1 error with pandas 0.13.1\nI just installed numexpr 2.3.1 with pandas 0.13.1 and got the following error:\n\n File \"C:\\Python27\\lib\\site-packages\\pandas\\core\\ops.py\", line 496, in wrapper\n arr = na_op(lvalues, rvalues)\n File \"C:\\Python27\\lib\\site-packages\\pandas\\core\\ops.py\", line 443, in na_op\n raise_on_error=True, *_eval_kwargs)\n File \"C:\\Python27\\lib\\site-packages\\pandas\\computation\\expressions.py\", line 176, in evaluate\n *_eval_kwargs)\n File \"C:\\Python27\\lib\\site-packages\\pandas\\computation\\expressions.py\", line 104, in _evaluate_numexpr\n *_eval_kwargs)\n File \"C:\\Python27\\lib\\site-packages\\numexpr\\necompiler.py\", line 738, in evaluate\n NumExpr(ex, signature, *_context)\n File \"C:\\Python27\\lib\\site-packages\\numexpr\\necompiler.py\", line 554, in NumExpr\n precompile(ex, signature, context)\n File \"C:\\Python27\\lib\\site-packages\\numexpr\\necompiler.py\", line 498, in precompile\n ast = typeCompileAst(ast)\n File \"C:\\Python27\\lib\\site-packages\\numexpr\\necompiler.py\", line 163, in typeCompileAst\n % (ast.value + '_' + retsig+basesig))\nNotImplementedError: couldn't find matching opcode for 'mul_bbb'\n\n", "before_files": [{"content": "\"\"\"\nExpressions\n-----------\n\nOffer fast expression evaluation through numexpr\n\n\"\"\"\n\nimport numpy as np\nfrom pandas.core.common import _values_from_object\nfrom distutils.version import LooseVersion\n\ntry:\n import numexpr as ne\n _NUMEXPR_INSTALLED = ne.__version__ >= LooseVersion('2.0')\nexcept ImportError: # pragma: no cover\n _NUMEXPR_INSTALLED = False\n\n_TEST_MODE = None\n_TEST_RESULT = None\n_USE_NUMEXPR = _NUMEXPR_INSTALLED\n_evaluate = None\n_where = None\n\n# the set of dtypes that we will allow pass to numexpr\n_ALLOWED_DTYPES = {\n 'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),\n 'where': set(['int64', 'float64', 'bool'])\n}\n\n# the minimum prod shape that we will use numexpr\n_MIN_ELEMENTS = 10000\n\n\ndef set_use_numexpr(v=True):\n # set/unset to use numexpr\n global _USE_NUMEXPR\n if _NUMEXPR_INSTALLED:\n _USE_NUMEXPR = v\n\n # choose what we are going to do\n global _evaluate, _where\n if not _USE_NUMEXPR:\n _evaluate = _evaluate_standard\n _where = _where_standard\n else:\n _evaluate = _evaluate_numexpr\n _where = _where_numexpr\n\n\ndef set_numexpr_threads(n=None):\n # if we are using numexpr, set the threads to n\n # otherwise reset\n if _NUMEXPR_INSTALLED and _USE_NUMEXPR:\n if n is None:\n n = ne.detect_number_of_cores()\n ne.set_num_threads(n)\n\n\ndef _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs):\n \"\"\" standard evaluation \"\"\"\n if _TEST_MODE:\n _store_test_result(False)\n return op(a, b)\n\n\ndef _can_use_numexpr(op, op_str, a, b, dtype_check):\n \"\"\" return a boolean if we WILL be using numexpr \"\"\"\n if op_str is not None:\n\n # required min elements (otherwise we are adding overhead)\n if np.prod(a.shape) > _MIN_ELEMENTS:\n\n # check for dtype compatiblity\n dtypes = set()\n for o in [a, b]:\n if hasattr(o, 'get_dtype_counts'):\n s = o.get_dtype_counts()\n if len(s) > 1:\n return False\n dtypes |= set(s.index)\n elif isinstance(o, np.ndarray):\n dtypes |= set([o.dtype.name])\n\n # allowed are a superset\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\n return True\n\n return False\n\n\ndef _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True,\n **eval_kwargs):\n result = None\n\n if _can_use_numexpr(op, op_str, a, b, 'evaluate'):\n try:\n a_value = getattr(a, \"values\", a)\n b_value = getattr(b, \"values\", b)\n result = ne.evaluate('a_value %s b_value' % op_str,\n local_dict={'a_value': a_value,\n 'b_value': b_value},\n casting='safe', truediv=truediv,\n **eval_kwargs)\n except ValueError as detail:\n if 'unknown type object' in str(detail):\n pass\n except Exception as detail:\n if raise_on_error:\n raise\n\n if _TEST_MODE:\n _store_test_result(result is not None)\n\n if result is None:\n result = _evaluate_standard(op, op_str, a, b, raise_on_error)\n\n return result\n\n\ndef _where_standard(cond, a, b, raise_on_error=True):\n return np.where(_values_from_object(cond), _values_from_object(a),\n _values_from_object(b))\n\n\ndef _where_numexpr(cond, a, b, raise_on_error=False):\n result = None\n\n if _can_use_numexpr(None, 'where', a, b, 'where'):\n\n try:\n cond_value = getattr(cond, 'values', cond)\n a_value = getattr(a, 'values', a)\n b_value = getattr(b, 'values', b)\n result = ne.evaluate('where(cond_value, a_value, b_value)',\n local_dict={'cond_value': cond_value,\n 'a_value': a_value,\n 'b_value': b_value},\n casting='safe')\n except ValueError as detail:\n if 'unknown type object' in str(detail):\n pass\n except Exception as detail:\n if raise_on_error:\n raise TypeError(str(detail))\n\n if result is None:\n result = _where_standard(cond, a, b, raise_on_error)\n\n return result\n\n\n# turn myself on\nset_use_numexpr(True)\n\n\ndef evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True,\n **eval_kwargs):\n \"\"\" evaluate and return the expression of the op on a and b\n\n Parameters\n ----------\n\n op : the actual operand\n op_str: the string version of the op\n a : left operand\n b : right operand\n raise_on_error : pass the error to the higher level if indicated\n (default is False), otherwise evaluate the op with and\n return the results\n use_numexpr : whether to try to use numexpr (default True)\n \"\"\"\n\n if use_numexpr:\n return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error,\n **eval_kwargs)\n return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)\n\n\ndef where(cond, a, b, raise_on_error=False, use_numexpr=True):\n \"\"\" evaluate the where condition cond on a and b\n\n Parameters\n ----------\n\n cond : a boolean array\n a : return if cond is True\n b : return if cond is False\n raise_on_error : pass the error to the higher level if indicated\n (default is False), otherwise evaluate the op with and\n return the results\n use_numexpr : whether to try to use numexpr (default True)\n \"\"\"\n\n if use_numexpr:\n return _where(cond, a, b, raise_on_error=raise_on_error)\n return _where_standard(cond, a, b, raise_on_error=raise_on_error)\n\n\ndef set_test_mode(v=True):\n \"\"\"\n Keeps track of whether numexpr was used. Stores an additional ``True``\n for every successful use of evaluate with numexpr since the last\n ``get_test_result``\n \"\"\"\n global _TEST_MODE, _TEST_RESULT\n _TEST_MODE = v\n _TEST_RESULT = []\n\n\ndef _store_test_result(used_numexpr):\n global _TEST_RESULT\n if used_numexpr:\n _TEST_RESULT.append(used_numexpr)\n\n\ndef get_test_result():\n \"\"\"get test result and reset test_results\"\"\"\n global _TEST_RESULT\n res = _TEST_RESULT\n _TEST_RESULT = []\n return res\n", "path": "pandas/computation/expressions.py"}], "after_files": [{"content": "\"\"\"\nExpressions\n-----------\n\nOffer fast expression evaluation through numexpr\n\n\"\"\"\n\nimport numpy as np\nfrom pandas.core.common import _values_from_object\nfrom distutils.version import LooseVersion\n\ntry:\n import numexpr as ne\n _NUMEXPR_INSTALLED = ne.__version__ >= LooseVersion('2.0')\nexcept ImportError: # pragma: no cover\n _NUMEXPR_INSTALLED = False\n\n_TEST_MODE = None\n_TEST_RESULT = None\n_USE_NUMEXPR = _NUMEXPR_INSTALLED\n_evaluate = None\n_where = None\n\n# the set of dtypes that we will allow pass to numexpr\n_ALLOWED_DTYPES = {\n 'evaluate': set(['int64', 'int32', 'float64', 'float32', 'bool']),\n 'where': set(['int64', 'float64', 'bool'])\n}\n\n# the minimum prod shape that we will use numexpr\n_MIN_ELEMENTS = 10000\n\n\ndef set_use_numexpr(v=True):\n # set/unset to use numexpr\n global _USE_NUMEXPR\n if _NUMEXPR_INSTALLED:\n _USE_NUMEXPR = v\n\n # choose what we are going to do\n global _evaluate, _where\n if not _USE_NUMEXPR:\n _evaluate = _evaluate_standard\n _where = _where_standard\n else:\n _evaluate = _evaluate_numexpr\n _where = _where_numexpr\n\n\ndef set_numexpr_threads(n=None):\n # if we are using numexpr, set the threads to n\n # otherwise reset\n if _NUMEXPR_INSTALLED and _USE_NUMEXPR:\n if n is None:\n n = ne.detect_number_of_cores()\n ne.set_num_threads(n)\n\n\ndef _evaluate_standard(op, op_str, a, b, raise_on_error=True, **eval_kwargs):\n \"\"\" standard evaluation \"\"\"\n if _TEST_MODE:\n _store_test_result(False)\n return op(a, b)\n\n\ndef _can_use_numexpr(op, op_str, a, b, dtype_check):\n \"\"\" return a boolean if we WILL be using numexpr \"\"\"\n if op_str is not None:\n\n # required min elements (otherwise we are adding overhead)\n if np.prod(a.shape) > _MIN_ELEMENTS:\n\n # check for dtype compatiblity\n dtypes = set()\n for o in [a, b]:\n if hasattr(o, 'get_dtype_counts'):\n s = o.get_dtype_counts()\n if len(s) > 1:\n return False\n dtypes |= set(s.index)\n elif isinstance(o, np.ndarray):\n dtypes |= set([o.dtype.name])\n\n # allowed are a superset\n if not len(dtypes) or _ALLOWED_DTYPES[dtype_check] >= dtypes:\n return True\n\n return False\n\n\ndef _evaluate_numexpr(op, op_str, a, b, raise_on_error=False, truediv=True,\n **eval_kwargs):\n result = None\n\n if _can_use_numexpr(op, op_str, a, b, 'evaluate'):\n try:\n a_value = getattr(a, \"values\", a)\n b_value = getattr(b, \"values\", b)\n result = ne.evaluate('a_value %s b_value' % op_str,\n local_dict={'a_value': a_value,\n 'b_value': b_value},\n casting='safe', truediv=truediv,\n **eval_kwargs)\n except ValueError as detail:\n if 'unknown type object' in str(detail):\n pass\n except Exception as detail:\n if raise_on_error:\n raise\n\n if _TEST_MODE:\n _store_test_result(result is not None)\n\n if result is None:\n result = _evaluate_standard(op, op_str, a, b, raise_on_error)\n\n return result\n\n\ndef _where_standard(cond, a, b, raise_on_error=True):\n return np.where(_values_from_object(cond), _values_from_object(a),\n _values_from_object(b))\n\n\ndef _where_numexpr(cond, a, b, raise_on_error=False):\n result = None\n\n if _can_use_numexpr(None, 'where', a, b, 'where'):\n\n try:\n cond_value = getattr(cond, 'values', cond)\n a_value = getattr(a, 'values', a)\n b_value = getattr(b, 'values', b)\n result = ne.evaluate('where(cond_value, a_value, b_value)',\n local_dict={'cond_value': cond_value,\n 'a_value': a_value,\n 'b_value': b_value},\n casting='safe')\n except ValueError as detail:\n if 'unknown type object' in str(detail):\n pass\n except Exception as detail:\n if raise_on_error:\n raise TypeError(str(detail))\n\n if result is None:\n result = _where_standard(cond, a, b, raise_on_error)\n\n return result\n\n\n# turn myself on\nset_use_numexpr(True)\n\n\ndef _has_bool_dtype(x):\n try:\n return x.dtype == bool\n except AttributeError:\n return 'bool' in x.blocks\n\n\ndef _bool_arith_check(op_str, a, b, not_allowed=frozenset(('+', '*', '-', '/',\n '//', '**'))):\n if op_str in not_allowed and _has_bool_dtype(a) and _has_bool_dtype(b):\n raise NotImplementedError(\"operator %r not implemented for bool \"\n \"dtypes\" % op_str)\n\n\ndef evaluate(op, op_str, a, b, raise_on_error=False, use_numexpr=True,\n **eval_kwargs):\n \"\"\" evaluate and return the expression of the op on a and b\n\n Parameters\n ----------\n\n op : the actual operand\n op_str: the string version of the op\n a : left operand\n b : right operand\n raise_on_error : pass the error to the higher level if indicated\n (default is False), otherwise evaluate the op with and\n return the results\n use_numexpr : whether to try to use numexpr (default True)\n \"\"\"\n _bool_arith_check(op_str, a, b)\n if use_numexpr:\n return _evaluate(op, op_str, a, b, raise_on_error=raise_on_error,\n **eval_kwargs)\n return _evaluate_standard(op, op_str, a, b, raise_on_error=raise_on_error)\n\n\ndef where(cond, a, b, raise_on_error=False, use_numexpr=True):\n \"\"\" evaluate the where condition cond on a and b\n\n Parameters\n ----------\n\n cond : a boolean array\n a : return if cond is True\n b : return if cond is False\n raise_on_error : pass the error to the higher level if indicated\n (default is False), otherwise evaluate the op with and\n return the results\n use_numexpr : whether to try to use numexpr (default True)\n \"\"\"\n\n if use_numexpr:\n return _where(cond, a, b, raise_on_error=raise_on_error)\n return _where_standard(cond, a, b, raise_on_error=raise_on_error)\n\n\ndef set_test_mode(v=True):\n \"\"\"\n Keeps track of whether numexpr was used. Stores an additional ``True``\n for every successful use of evaluate with numexpr since the last\n ``get_test_result``\n \"\"\"\n global _TEST_MODE, _TEST_RESULT\n _TEST_MODE = v\n _TEST_RESULT = []\n\n\ndef _store_test_result(used_numexpr):\n global _TEST_RESULT\n if used_numexpr:\n _TEST_RESULT.append(used_numexpr)\n\n\ndef get_test_result():\n \"\"\"get test result and reset test_results\"\"\"\n global _TEST_RESULT\n res = _TEST_RESULT\n _TEST_RESULT = []\n return res\n", "path": "pandas/computation/expressions.py"}]} | 2,801 | 307 |
gh_patches_debug_770 | rasdani/github-patches | git_diff | open-telemetry__opentelemetry-python-1653 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Consider renaming Resource.create_empty() to Resource.get_empty()
Specially given the fact a cached instance is returned, i.e. no actual creation happens.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py`
Content:
```
1 # Copyright The OpenTelemetry Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """
16 This package implements `OpenTelemetry Resources
17 <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:
18
19 *A Resource is an immutable representation of the entity producing
20 telemetry. For example, a process producing telemetry that is running in
21 a container on Kubernetes has a Pod name, it is in a namespace and
22 possibly is part of a Deployment which also has a name. All three of
23 these attributes can be included in the Resource.*
24
25 Resource objects are created with `Resource.create`, which accepts attributes
26 (key-values). Resource attributes can also be passed at process invocation in
27 the :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should
28 register your resource with the `opentelemetry.sdk.trace.TracerProvider` by
29 passing them into their constructors. The `Resource` passed to a provider is
30 available to the exporter, which can send on this information as it sees fit.
31
32 .. code-block:: python
33
34 trace.set_tracer_provider(
35 TracerProvider(
36 resource=Resource.create({
37 "service.name": "shoppingcart",
38 "service.instance.id": "instance-12",
39 }),
40 ),
41 )
42 print(trace.get_tracer_provider().resource.attributes)
43
44 {'telemetry.sdk.language': 'python',
45 'telemetry.sdk.name': 'opentelemetry',
46 'telemetry.sdk.version': '0.13.dev0',
47 'service.name': 'shoppingcart',
48 'service.instance.id': 'instance-12'}
49
50 Note that the OpenTelemetry project documents certain `"standard attributes"
51 <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_
52 that have prescribed semantic meanings, for example ``service.name`` in the
53 above example.
54
55 .. envvar:: OTEL_RESOURCE_ATTRIBUTES
56
57 The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource
58 attributes to be passed to the SDK at process invocation. The attributes from
59 :envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to
60 `Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*
61 priority. Attributes should be in the format ``key1=value1,key2=value2``.
62 Additional details are available `in the specification
63 <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.
64
65 .. code-block:: console
66
67 $ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF
68 import pprint
69 from opentelemetry.sdk.resources import Resource
70 pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes)
71 EOF
72 {'service.name': 'shoppingcard',
73 'telemetry.sdk.language': 'python',
74 'telemetry.sdk.name': 'opentelemetry',
75 'telemetry.sdk.version': '0.13.dev0',
76 'will_be_overridden': 'bar'}
77 """
78
79 import abc
80 import concurrent.futures
81 import logging
82 import os
83 import typing
84 from json import dumps
85
86 import pkg_resources
87
88 from opentelemetry.sdk.environment_variables import OTEL_RESOURCE_ATTRIBUTES
89
90 LabelValue = typing.Union[str, bool, int, float]
91 Attributes = typing.Dict[str, LabelValue]
92 logger = logging.getLogger(__name__)
93
94
95 CLOUD_PROVIDER = "cloud.provider"
96 CLOUD_ACCOUNT_ID = "cloud.account.id"
97 CLOUD_REGION = "cloud.region"
98 CLOUD_ZONE = "cloud.zone"
99 CONTAINER_NAME = "container.name"
100 CONTAINER_ID = "container.id"
101 CONTAINER_IMAGE_NAME = "container.image.name"
102 CONTAINER_IMAGE_TAG = "container.image.tag"
103 DEPLOYMENT_ENVIRONMENT = "deployment.environment"
104 FAAS_NAME = "faas.name"
105 FAAS_ID = "faas.id"
106 FAAS_VERSION = "faas.version"
107 FAAS_INSTANCE = "faas.instance"
108 HOST_NAME = "host.name"
109 HOST_TYPE = "host.type"
110 HOST_IMAGE_NAME = "host.image.name"
111 HOST_IMAGE_ID = "host.image.id"
112 HOST_IMAGE_VERSION = "host.image.version"
113 KUBERNETES_CLUSTER_NAME = "k8s.cluster.name"
114 KUBERNETES_NAMESPACE_NAME = "k8s.namespace.name"
115 KUBERNETES_POD_UID = "k8s.pod.uid"
116 KUBERNETES_POD_NAME = "k8s.pod.name"
117 KUBERNETES_CONTAINER_NAME = "k8s.container.name"
118 KUBERNETES_REPLICA_SET_UID = "k8s.replicaset.uid"
119 KUBERNETES_REPLICA_SET_NAME = "k8s.replicaset.name"
120 KUBERNETES_DEPLOYMENT_UID = "k8s.deployment.uid"
121 KUBERNETES_DEPLOYMENT_NAME = "k8s.deployment.name"
122 KUBERNETES_STATEFUL_SET_UID = "k8s.statefulset.uid"
123 KUBERNETES_STATEFUL_SET_NAME = "k8s.statefulset.name"
124 KUBERNETES_DAEMON_SET_UID = "k8s.daemonset.uid"
125 KUBERNETES_DAEMON_SET_NAME = "k8s.daemonset.name"
126 KUBERNETES_JOB_UID = "k8s.job.uid"
127 KUBERNETES_JOB_NAME = "k8s.job.name"
128 KUBERNETES_CRON_JOB_UID = "k8s.cronjob.uid"
129 KUBERNETES_CRON_JOB_NAME = "k8s.cronjob.name"
130 OS_TYPE = "os.type"
131 OS_DESCRIPTION = "os.description"
132 PROCESS_PID = "process.pid"
133 PROCESS_EXECUTABLE_NAME = "process.executable.name"
134 PROCESS_EXECUTABLE_PATH = "process.executable.path"
135 PROCESS_COMMAND = "process.command"
136 PROCESS_COMMAND_LINE = "process.command_line"
137 PROCESS_COMMAND_ARGS = "process.command_args"
138 PROCESS_OWNER = "process.owner"
139 PROCESS_RUNTIME_NAME = "process.runtime.name"
140 PROCESS_RUNTIME_VERSION = "process.runtime.version"
141 PROCESS_RUNTIME_DESCRIPTION = "process.runtime.description"
142 SERVICE_NAME = "service.name"
143 SERVICE_NAMESPACE = "service.namespace"
144 SERVICE_INSTANCE_ID = "service.instance.id"
145 SERVICE_VERSION = "service.version"
146 TELEMETRY_SDK_NAME = "telemetry.sdk.name"
147 TELEMETRY_SDK_VERSION = "telemetry.sdk.version"
148 TELEMETRY_AUTO_VERSION = "telemetry.auto.version"
149 TELEMETRY_SDK_LANGUAGE = "telemetry.sdk.language"
150
151
152 OPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(
153 "opentelemetry-sdk"
154 ).version
155
156
157 class Resource:
158 """A Resource is an immutable representation of the entity producing telemetry as Attributes."""
159
160 def __init__(self, attributes: Attributes):
161 self._attributes = attributes.copy()
162
163 @staticmethod
164 def create(attributes: typing.Optional[Attributes] = None) -> "Resource":
165 """Creates a new `Resource` from attributes.
166
167 Args:
168 attributes: Optional zero or more key-value pairs.
169
170 Returns:
171 The newly-created Resource.
172 """
173 if not attributes:
174 attributes = {}
175 resource = _DEFAULT_RESOURCE.merge(
176 OTELResourceDetector().detect()
177 ).merge(Resource(attributes))
178 if not resource.attributes.get(SERVICE_NAME, None):
179 default_service_name = "unknown_service"
180 process_executable_name = resource.attributes.get(
181 PROCESS_EXECUTABLE_NAME, None
182 )
183 if process_executable_name:
184 default_service_name += ":" + process_executable_name
185 resource = resource.merge(
186 Resource({SERVICE_NAME: default_service_name})
187 )
188 return resource
189
190 @staticmethod
191 def create_empty() -> "Resource":
192 return _EMPTY_RESOURCE
193
194 @property
195 def attributes(self) -> Attributes:
196 return self._attributes.copy()
197
198 def merge(self, other: "Resource") -> "Resource":
199 """Merges this resource and an updating resource into a new `Resource`.
200
201 If a key exists on both the old and updating resource, the value of the
202 updating resource will override the old resource value.
203
204 Args:
205 other: The other resource to be merged.
206
207 Returns:
208 The newly-created Resource.
209 """
210 merged_attributes = self.attributes
211 merged_attributes.update(other.attributes)
212 return Resource(merged_attributes)
213
214 def __eq__(self, other: object) -> bool:
215 if not isinstance(other, Resource):
216 return False
217 return self._attributes == other._attributes
218
219 def __hash__(self):
220 return hash(dumps(self._attributes, sort_keys=True))
221
222
223 _EMPTY_RESOURCE = Resource({})
224 _DEFAULT_RESOURCE = Resource(
225 {
226 TELEMETRY_SDK_LANGUAGE: "python",
227 TELEMETRY_SDK_NAME: "opentelemetry",
228 TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,
229 }
230 )
231
232
233 class ResourceDetector(abc.ABC):
234 def __init__(self, raise_on_error=False):
235 self.raise_on_error = raise_on_error
236
237 @abc.abstractmethod
238 def detect(self) -> "Resource":
239 raise NotImplementedError()
240
241
242 class OTELResourceDetector(ResourceDetector):
243 # pylint: disable=no-self-use
244 def detect(self) -> "Resource":
245 env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)
246 env_resource_map = {}
247 if env_resources_items:
248 env_resource_map = {
249 key.strip(): value.strip()
250 for key, value in (
251 item.split("=") for item in env_resources_items.split(",")
252 )
253 }
254 return Resource(env_resource_map)
255
256
257 def get_aggregated_resources(
258 detectors: typing.List["ResourceDetector"],
259 initial_resource: typing.Optional[Resource] = None,
260 timeout=5,
261 ) -> "Resource":
262 """Retrieves resources from detectors in the order that they were passed
263
264 :param detectors: List of resources in order of priority
265 :param initial_resource: Static resource. This has highest priority
266 :param timeout: Number of seconds to wait for each detector to return
267 :return:
268 """
269 final_resource = initial_resource or _EMPTY_RESOURCE
270 detectors = [OTELResourceDetector()] + detectors
271
272 with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
273 futures = [executor.submit(detector.detect) for detector in detectors]
274 for detector_ind, future in enumerate(futures):
275 detector = detectors[detector_ind]
276 try:
277 detected_resources = future.result(timeout=timeout)
278 # pylint: disable=broad-except
279 except Exception as ex:
280 if detector.raise_on_error:
281 raise ex
282 logger.warning(
283 "Exception %s in detector %s, ignoring", ex, detector
284 )
285 detected_resources = _EMPTY_RESOURCE
286 finally:
287 final_resource = final_resource.merge(detected_resources)
288 return final_resource
289
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
--- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
+++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py
@@ -188,7 +188,7 @@
return resource
@staticmethod
- def create_empty() -> "Resource":
+ def get_empty() -> "Resource":
return _EMPTY_RESOURCE
@property
| {"golden_diff": "diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n--- a/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n+++ b/opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py\n@@ -188,7 +188,7 @@\n return resource\n \n @staticmethod\n- def create_empty() -> \"Resource\":\n+ def get_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n \n @property\n", "issue": "Consider renaming Resource.create_empty() to Resource.get_empty()\nSpecially given the fact a cached instance is returned, i.e. no actual creation happens.\n", "before_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.trace.TracerProvider` by\npassing them into their constructors. The `Resource` passed to a provider is\navailable to the exporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n trace.set_tracer_provider(\n TracerProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(trace.get_tracer_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nfrom opentelemetry.sdk.environment_variables import OTEL_RESOURCE_ATTRIBUTES\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nCLOUD_PROVIDER = \"cloud.provider\"\nCLOUD_ACCOUNT_ID = \"cloud.account.id\"\nCLOUD_REGION = \"cloud.region\"\nCLOUD_ZONE = \"cloud.zone\"\nCONTAINER_NAME = \"container.name\"\nCONTAINER_ID = \"container.id\"\nCONTAINER_IMAGE_NAME = \"container.image.name\"\nCONTAINER_IMAGE_TAG = \"container.image.tag\"\nDEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\nFAAS_NAME = \"faas.name\"\nFAAS_ID = \"faas.id\"\nFAAS_VERSION = \"faas.version\"\nFAAS_INSTANCE = \"faas.instance\"\nHOST_NAME = \"host.name\"\nHOST_TYPE = \"host.type\"\nHOST_IMAGE_NAME = \"host.image.name\"\nHOST_IMAGE_ID = \"host.image.id\"\nHOST_IMAGE_VERSION = \"host.image.version\"\nKUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\nKUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\nKUBERNETES_POD_UID = \"k8s.pod.uid\"\nKUBERNETES_POD_NAME = \"k8s.pod.name\"\nKUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\nKUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\nKUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\nKUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\nKUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\nKUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\nKUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\nKUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\nKUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\nKUBERNETES_JOB_UID = \"k8s.job.uid\"\nKUBERNETES_JOB_NAME = \"k8s.job.name\"\nKUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\nKUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\nOS_TYPE = \"os.type\"\nOS_DESCRIPTION = \"os.description\"\nPROCESS_PID = \"process.pid\"\nPROCESS_EXECUTABLE_NAME = \"process.executable.name\"\nPROCESS_EXECUTABLE_PATH = \"process.executable.path\"\nPROCESS_COMMAND = \"process.command\"\nPROCESS_COMMAND_LINE = \"process.command_line\"\nPROCESS_COMMAND_ARGS = \"process.command_args\"\nPROCESS_OWNER = \"process.owner\"\nPROCESS_RUNTIME_NAME = \"process.runtime.name\"\nPROCESS_RUNTIME_VERSION = \"process.runtime.version\"\nPROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\nSERVICE_NAME = \"service.name\"\nSERVICE_NAMESPACE = \"service.namespace\"\nSERVICE_INSTANCE_ID = \"service.instance.id\"\nSERVICE_VERSION = \"service.version\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\nTELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\n\n\nclass Resource:\n \"\"\"A Resource is an immutable representation of the entity producing telemetry as Attributes.\"\"\"\n\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n \"\"\"Creates a new `Resource` from attributes.\n\n Args:\n attributes: Optional zero or more key-value pairs.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n if not attributes:\n attributes = {}\n resource = _DEFAULT_RESOURCE.merge(\n OTELResourceDetector().detect()\n ).merge(Resource(attributes))\n if not resource.attributes.get(SERVICE_NAME, None):\n default_service_name = \"unknown_service\"\n process_executable_name = resource.attributes.get(\n PROCESS_EXECUTABLE_NAME, None\n )\n if process_executable_name:\n default_service_name += \":\" + process_executable_name\n resource = resource.merge(\n Resource({SERVICE_NAME: default_service_name})\n )\n return resource\n\n @staticmethod\n def create_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n \"\"\"Merges this resource and an updating resource into a new `Resource`.\n\n If a key exists on both the old and updating resource, the value of the\n updating resource will override the old resource value.\n\n Args:\n other: The other resource to be merged.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n merged_attributes = self.attributes\n merged_attributes.update(other.attributes)\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\"Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py"}], "after_files": [{"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis package implements `OpenTelemetry Resources\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:\n\n *A Resource is an immutable representation of the entity producing\n telemetry. For example, a process producing telemetry that is running in\n a container on Kubernetes has a Pod name, it is in a namespace and\n possibly is part of a Deployment which also has a name. All three of\n these attributes can be included in the Resource.*\n\nResource objects are created with `Resource.create`, which accepts attributes\n(key-values). Resource attributes can also be passed at process invocation in\nthe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should\nregister your resource with the `opentelemetry.sdk.trace.TracerProvider` by\npassing them into their constructors. The `Resource` passed to a provider is\navailable to the exporter, which can send on this information as it sees fit.\n\n.. code-block:: python\n\n trace.set_tracer_provider(\n TracerProvider(\n resource=Resource.create({\n \"service.name\": \"shoppingcart\",\n \"service.instance.id\": \"instance-12\",\n }),\n ),\n )\n print(trace.get_tracer_provider().resource.attributes)\n\n {'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'service.name': 'shoppingcart',\n 'service.instance.id': 'instance-12'}\n\nNote that the OpenTelemetry project documents certain `\"standard attributes\"\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_\nthat have prescribed semantic meanings, for example ``service.name`` in the\nabove example.\n\n.. envvar:: OTEL_RESOURCE_ATTRIBUTES\n\nThe :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource\nattributes to be passed to the SDK at process invocation. The attributes from\n:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to\n`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*\npriority. Attributes should be in the format ``key1=value1,key2=value2``.\nAdditional details are available `in the specification\n<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`_.\n\n.. code-block:: console\n\n $ OTEL_RESOURCE_ATTRIBUTES=\"service.name=shoppingcard,will_be_overridden=foo\" python - <<EOF\n import pprint\n from opentelemetry.sdk.resources import Resource\n pprint.pprint(Resource.create({\"will_be_overridden\": \"bar\"}).attributes)\n EOF\n {'service.name': 'shoppingcard',\n 'telemetry.sdk.language': 'python',\n 'telemetry.sdk.name': 'opentelemetry',\n 'telemetry.sdk.version': '0.13.dev0',\n 'will_be_overridden': 'bar'}\n \"\"\"\n\nimport abc\nimport concurrent.futures\nimport logging\nimport os\nimport typing\nfrom json import dumps\n\nimport pkg_resources\n\nfrom opentelemetry.sdk.environment_variables import OTEL_RESOURCE_ATTRIBUTES\n\nLabelValue = typing.Union[str, bool, int, float]\nAttributes = typing.Dict[str, LabelValue]\nlogger = logging.getLogger(__name__)\n\n\nCLOUD_PROVIDER = \"cloud.provider\"\nCLOUD_ACCOUNT_ID = \"cloud.account.id\"\nCLOUD_REGION = \"cloud.region\"\nCLOUD_ZONE = \"cloud.zone\"\nCONTAINER_NAME = \"container.name\"\nCONTAINER_ID = \"container.id\"\nCONTAINER_IMAGE_NAME = \"container.image.name\"\nCONTAINER_IMAGE_TAG = \"container.image.tag\"\nDEPLOYMENT_ENVIRONMENT = \"deployment.environment\"\nFAAS_NAME = \"faas.name\"\nFAAS_ID = \"faas.id\"\nFAAS_VERSION = \"faas.version\"\nFAAS_INSTANCE = \"faas.instance\"\nHOST_NAME = \"host.name\"\nHOST_TYPE = \"host.type\"\nHOST_IMAGE_NAME = \"host.image.name\"\nHOST_IMAGE_ID = \"host.image.id\"\nHOST_IMAGE_VERSION = \"host.image.version\"\nKUBERNETES_CLUSTER_NAME = \"k8s.cluster.name\"\nKUBERNETES_NAMESPACE_NAME = \"k8s.namespace.name\"\nKUBERNETES_POD_UID = \"k8s.pod.uid\"\nKUBERNETES_POD_NAME = \"k8s.pod.name\"\nKUBERNETES_CONTAINER_NAME = \"k8s.container.name\"\nKUBERNETES_REPLICA_SET_UID = \"k8s.replicaset.uid\"\nKUBERNETES_REPLICA_SET_NAME = \"k8s.replicaset.name\"\nKUBERNETES_DEPLOYMENT_UID = \"k8s.deployment.uid\"\nKUBERNETES_DEPLOYMENT_NAME = \"k8s.deployment.name\"\nKUBERNETES_STATEFUL_SET_UID = \"k8s.statefulset.uid\"\nKUBERNETES_STATEFUL_SET_NAME = \"k8s.statefulset.name\"\nKUBERNETES_DAEMON_SET_UID = \"k8s.daemonset.uid\"\nKUBERNETES_DAEMON_SET_NAME = \"k8s.daemonset.name\"\nKUBERNETES_JOB_UID = \"k8s.job.uid\"\nKUBERNETES_JOB_NAME = \"k8s.job.name\"\nKUBERNETES_CRON_JOB_UID = \"k8s.cronjob.uid\"\nKUBERNETES_CRON_JOB_NAME = \"k8s.cronjob.name\"\nOS_TYPE = \"os.type\"\nOS_DESCRIPTION = \"os.description\"\nPROCESS_PID = \"process.pid\"\nPROCESS_EXECUTABLE_NAME = \"process.executable.name\"\nPROCESS_EXECUTABLE_PATH = \"process.executable.path\"\nPROCESS_COMMAND = \"process.command\"\nPROCESS_COMMAND_LINE = \"process.command_line\"\nPROCESS_COMMAND_ARGS = \"process.command_args\"\nPROCESS_OWNER = \"process.owner\"\nPROCESS_RUNTIME_NAME = \"process.runtime.name\"\nPROCESS_RUNTIME_VERSION = \"process.runtime.version\"\nPROCESS_RUNTIME_DESCRIPTION = \"process.runtime.description\"\nSERVICE_NAME = \"service.name\"\nSERVICE_NAMESPACE = \"service.namespace\"\nSERVICE_INSTANCE_ID = \"service.instance.id\"\nSERVICE_VERSION = \"service.version\"\nTELEMETRY_SDK_NAME = \"telemetry.sdk.name\"\nTELEMETRY_SDK_VERSION = \"telemetry.sdk.version\"\nTELEMETRY_AUTO_VERSION = \"telemetry.auto.version\"\nTELEMETRY_SDK_LANGUAGE = \"telemetry.sdk.language\"\n\n\nOPENTELEMETRY_SDK_VERSION = pkg_resources.get_distribution(\n \"opentelemetry-sdk\"\n).version\n\n\nclass Resource:\n \"\"\"A Resource is an immutable representation of the entity producing telemetry as Attributes.\"\"\"\n\n def __init__(self, attributes: Attributes):\n self._attributes = attributes.copy()\n\n @staticmethod\n def create(attributes: typing.Optional[Attributes] = None) -> \"Resource\":\n \"\"\"Creates a new `Resource` from attributes.\n\n Args:\n attributes: Optional zero or more key-value pairs.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n if not attributes:\n attributes = {}\n resource = _DEFAULT_RESOURCE.merge(\n OTELResourceDetector().detect()\n ).merge(Resource(attributes))\n if not resource.attributes.get(SERVICE_NAME, None):\n default_service_name = \"unknown_service\"\n process_executable_name = resource.attributes.get(\n PROCESS_EXECUTABLE_NAME, None\n )\n if process_executable_name:\n default_service_name += \":\" + process_executable_name\n resource = resource.merge(\n Resource({SERVICE_NAME: default_service_name})\n )\n return resource\n\n @staticmethod\n def get_empty() -> \"Resource\":\n return _EMPTY_RESOURCE\n\n @property\n def attributes(self) -> Attributes:\n return self._attributes.copy()\n\n def merge(self, other: \"Resource\") -> \"Resource\":\n \"\"\"Merges this resource and an updating resource into a new `Resource`.\n\n If a key exists on both the old and updating resource, the value of the\n updating resource will override the old resource value.\n\n Args:\n other: The other resource to be merged.\n\n Returns:\n The newly-created Resource.\n \"\"\"\n merged_attributes = self.attributes\n merged_attributes.update(other.attributes)\n return Resource(merged_attributes)\n\n def __eq__(self, other: object) -> bool:\n if not isinstance(other, Resource):\n return False\n return self._attributes == other._attributes\n\n def __hash__(self):\n return hash(dumps(self._attributes, sort_keys=True))\n\n\n_EMPTY_RESOURCE = Resource({})\n_DEFAULT_RESOURCE = Resource(\n {\n TELEMETRY_SDK_LANGUAGE: \"python\",\n TELEMETRY_SDK_NAME: \"opentelemetry\",\n TELEMETRY_SDK_VERSION: OPENTELEMETRY_SDK_VERSION,\n }\n)\n\n\nclass ResourceDetector(abc.ABC):\n def __init__(self, raise_on_error=False):\n self.raise_on_error = raise_on_error\n\n @abc.abstractmethod\n def detect(self) -> \"Resource\":\n raise NotImplementedError()\n\n\nclass OTELResourceDetector(ResourceDetector):\n # pylint: disable=no-self-use\n def detect(self) -> \"Resource\":\n env_resources_items = os.environ.get(OTEL_RESOURCE_ATTRIBUTES)\n env_resource_map = {}\n if env_resources_items:\n env_resource_map = {\n key.strip(): value.strip()\n for key, value in (\n item.split(\"=\") for item in env_resources_items.split(\",\")\n )\n }\n return Resource(env_resource_map)\n\n\ndef get_aggregated_resources(\n detectors: typing.List[\"ResourceDetector\"],\n initial_resource: typing.Optional[Resource] = None,\n timeout=5,\n) -> \"Resource\":\n \"\"\"Retrieves resources from detectors in the order that they were passed\n\n :param detectors: List of resources in order of priority\n :param initial_resource: Static resource. This has highest priority\n :param timeout: Number of seconds to wait for each detector to return\n :return:\n \"\"\"\n final_resource = initial_resource or _EMPTY_RESOURCE\n detectors = [OTELResourceDetector()] + detectors\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:\n futures = [executor.submit(detector.detect) for detector in detectors]\n for detector_ind, future in enumerate(futures):\n detector = detectors[detector_ind]\n try:\n detected_resources = future.result(timeout=timeout)\n # pylint: disable=broad-except\n except Exception as ex:\n if detector.raise_on_error:\n raise ex\n logger.warning(\n \"Exception %s in detector %s, ignoring\", ex, detector\n )\n detected_resources = _EMPTY_RESOURCE\n finally:\n final_resource = final_resource.merge(detected_resources)\n return final_resource\n", "path": "opentelemetry-sdk/src/opentelemetry/sdk/resources/__init__.py"}]} | 3,468 | 120 |
gh_patches_debug_11990 | rasdani/github-patches | git_diff | kivy__python-for-android-1513 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Didn't find any valid dependency graphs. - Flask and websocket-client
In my app I use both flask and websocket-client. However, when i try to add both of these dependencies to my app, p4a fails. However, when I build my app only with `flask`, or only with `websocket-client` p4a works correctly.
```
p4a apk --private /home/user/sample/ --package=samplepackage --name="Sample app" --version 0.1 --bootstrap=sdl2 --requirements=python2,websocket-client,flask
[ERROR]: Didn't find any valid dependency graphs.
[ERROR]: This means that some of your requirements pull in conflicting dependencies.
[ERROR]: Exiting.```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pythonforandroid/recipes/websocket-client/__init__.py`
Content:
```
1 from pythonforandroid.toolchain import Recipe
2
3 # if android app crashes on start with "ImportError: No module named websocket"
4 #
5 # copy the 'websocket' directory into your app directory to force inclusion.
6 #
7 # see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe
8 #
9 # If you see errors relating to 'SSL not available' ensure you have the package backports.ssl-match-hostname
10 # in the buildozer requirements, since Kivy targets python 2.7.x
11 #
12 # You may also need sslopt={"cert_reqs": ssl.CERT_NONE} as a parameter to ws.run_forever() if you get an error relating to
13 # host verification
14
15
16 class WebSocketClient(Recipe):
17
18 url = 'https://github.com/debauchery1st/websocket-client/raw/master/websocket_client-0.40.0.tar.gz'
19
20 version = '0.40.0'
21 # md5sum = 'f1cf4cc7869ef97a98e5f4be25c30986'
22
23 # patches = ['websocket.patch'] # Paths relative to the recipe dir
24
25 depends = ['kivy', 'python2', 'android', 'pyjnius',
26 'cryptography', 'pyasn1', 'pyopenssl']
27
28
29 recipe = WebSocketClient()
30
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pythonforandroid/recipes/websocket-client/__init__.py b/pythonforandroid/recipes/websocket-client/__init__.py
--- a/pythonforandroid/recipes/websocket-client/__init__.py
+++ b/pythonforandroid/recipes/websocket-client/__init__.py
@@ -15,15 +15,13 @@
class WebSocketClient(Recipe):
- url = 'https://github.com/debauchery1st/websocket-client/raw/master/websocket_client-0.40.0.tar.gz'
+ url = 'https://github.com/websocket-client/websocket-client/archive/v{version}.tar.gz'
version = '0.40.0'
- # md5sum = 'f1cf4cc7869ef97a98e5f4be25c30986'
# patches = ['websocket.patch'] # Paths relative to the recipe dir
- depends = ['kivy', 'python2', 'android', 'pyjnius',
- 'cryptography', 'pyasn1', 'pyopenssl']
+ depends = ['python2', 'android', 'pyjnius', 'cryptography', 'pyasn1', 'pyopenssl']
recipe = WebSocketClient()
| {"golden_diff": "diff --git a/pythonforandroid/recipes/websocket-client/__init__.py b/pythonforandroid/recipes/websocket-client/__init__.py\n--- a/pythonforandroid/recipes/websocket-client/__init__.py\n+++ b/pythonforandroid/recipes/websocket-client/__init__.py\n@@ -15,15 +15,13 @@\n \n class WebSocketClient(Recipe):\n \n- url = 'https://github.com/debauchery1st/websocket-client/raw/master/websocket_client-0.40.0.tar.gz'\n+ url = 'https://github.com/websocket-client/websocket-client/archive/v{version}.tar.gz'\n \n version = '0.40.0'\n- # md5sum = 'f1cf4cc7869ef97a98e5f4be25c30986'\n \n # patches = ['websocket.patch'] # Paths relative to the recipe dir\n \n- depends = ['kivy', 'python2', 'android', 'pyjnius',\n- 'cryptography', 'pyasn1', 'pyopenssl']\n+ depends = ['python2', 'android', 'pyjnius', 'cryptography', 'pyasn1', 'pyopenssl']\n \n \n recipe = WebSocketClient()\n", "issue": "Didn't find any valid dependency graphs. - Flask and websocket-client\nIn my app I use both flask and websocket-client. However, when i try to add both of these dependencies to my app, p4a fails. However, when I build my app only with `flask`, or only with `websocket-client` p4a works correctly.\r\n```\r\np4a apk --private /home/user/sample/ --package=samplepackage --name=\"Sample app\" --version 0.1 --bootstrap=sdl2 --requirements=python2,websocket-client,flask\r\n[ERROR]: Didn't find any valid dependency graphs.\r\n[ERROR]: This means that some of your requirements pull in conflicting dependencies.\r\n[ERROR]: Exiting.```\r\n\r\n\n", "before_files": [{"content": "from pythonforandroid.toolchain import Recipe\n\n# if android app crashes on start with \"ImportError: No module named websocket\"\n#\n# copy the 'websocket' directory into your app directory to force inclusion.\n#\n# see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe\n#\n# If you see errors relating to 'SSL not available' ensure you have the package backports.ssl-match-hostname\n# in the buildozer requirements, since Kivy targets python 2.7.x\n#\n# You may also need sslopt={\"cert_reqs\": ssl.CERT_NONE} as a parameter to ws.run_forever() if you get an error relating to\n# host verification\n\n\nclass WebSocketClient(Recipe):\n\n url = 'https://github.com/debauchery1st/websocket-client/raw/master/websocket_client-0.40.0.tar.gz'\n\n version = '0.40.0'\n # md5sum = 'f1cf4cc7869ef97a98e5f4be25c30986'\n\n # patches = ['websocket.patch'] # Paths relative to the recipe dir\n\n depends = ['kivy', 'python2', 'android', 'pyjnius',\n 'cryptography', 'pyasn1', 'pyopenssl']\n\n\nrecipe = WebSocketClient()\n", "path": "pythonforandroid/recipes/websocket-client/__init__.py"}], "after_files": [{"content": "from pythonforandroid.toolchain import Recipe\n\n# if android app crashes on start with \"ImportError: No module named websocket\"\n#\n# copy the 'websocket' directory into your app directory to force inclusion.\n#\n# see my example at https://github.com/debauchery1st/example_kivy_websocket-recipe\n#\n# If you see errors relating to 'SSL not available' ensure you have the package backports.ssl-match-hostname\n# in the buildozer requirements, since Kivy targets python 2.7.x\n#\n# You may also need sslopt={\"cert_reqs\": ssl.CERT_NONE} as a parameter to ws.run_forever() if you get an error relating to\n# host verification\n\n\nclass WebSocketClient(Recipe):\n\n url = 'https://github.com/websocket-client/websocket-client/archive/v{version}.tar.gz'\n\n version = '0.40.0'\n\n # patches = ['websocket.patch'] # Paths relative to the recipe dir\n\n depends = ['python2', 'android', 'pyjnius', 'cryptography', 'pyasn1', 'pyopenssl']\n\n\nrecipe = WebSocketClient()\n", "path": "pythonforandroid/recipes/websocket-client/__init__.py"}]} | 773 | 275 |
gh_patches_debug_5446 | rasdani/github-patches | git_diff | xonsh__xonsh-3964 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
'Window' object has no attribute 'children'
<!--- Provide a general summary of the issue in the Title above -->
<!--- If you have a question along the lines of "How do I do this Bash command in xonsh"
please first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html
If you don't find an answer there, please do open an issue! -->
## xonfig
<details>
```
+------------------+----------------------+
| xonsh | 0.9.24 |
| Git SHA | 74543ea9 |
| Commit Date | Oct 10 15:12:47 2020 |
| Python | 3.8.6 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.3 |
| shell type | prompt_toolkit |
| pygments | 2.7.2 |
| on posix | True |
| on linux | True |
| distro | manjaro |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| on jupyter | False |
| jupyter kernel | None |
| xontrib 1 | abbrevs |
| xontrib 2 | argcomplete |
| xontrib 3 | autovox |
| xontrib 4 | back2dir |
| xontrib 5 | cmd_done |
| xontrib 6 | hist_navigator |
| xontrib 7 | jedi |
| xontrib 8 | kitty |
| xontrib 9 | pdb |
| xontrib 10 | prompt_ret_code |
| xontrib 11 | vox |
| xontrib 12 | voxapi |
+------------------+----------------------+
```
</details>
## Expected Behavior
interactive shell runs without any error
## Current Behavior
<!--- Tell us what happens instead of the expected behavior -->
<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error
To enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.
On Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->
I get the above error randomly when `$UPDATE_COMPLETIONS_ON_KEYPRESS = True`
### Traceback (if applicable)
<details>
```
2020-11-08 22:06:05.995 | INFO | xonsh.ptk_shell.completer:reserve_space:118 - 8735829483909
2020-11-08 22:06:06.000 | ERROR | xonsh.ptk_shell.completer:reserve_space:126 - 'Window' object has no attribute 'children'
Traceback (most recent call last):
File "/usr/lib/python3.8/threading.py", line 890, in _bootstrap
self._bootstrap_inner()
│ └ <function Thread._bootstrap_inner at 0x7f1f895e01f0>
└ <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>
File "/usr/lib/python3.8/threading.py", line 932, in _bootstrap_inner
self.run()
│ └ <function Thread.run at 0x7f1f895dfee0>
└ <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>
File "/usr/lib/python3.8/threading.py", line 870, in run
self._target(*self._args, **self._kwargs)
│ │ │ │ │ └ {}
│ │ │ │ └ <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>
│ │ │ └ (<weakref at 0x7f1f803ed270; to 'ThreadPoolExecutor' at 0x7f1f81e857c0>, <_queue.SimpleQueue object at 0x7f1f82a8b2c0>, None,...
│ │ └ <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>
│ └ <function _worker at 0x7f1f81edb670>
└ <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 80, in _worker
work_item.run()
│ └ <function _WorkItem.run at 0x7f1f81edb790>
└ <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>
File "/usr/lib/python3.8/concurrent/futures/thread.py", line 57, in run
result = self.fn(*self.args, **self.kwargs)
│ │ │ │ │ └ {}
│ │ │ │ └ <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>
│ │ │ └ [<function generator_to_async_generator.<locals>.runner at 0x7f1f81f13e50>]
│ │ └ <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>
│ └ <built-in method run of Context object at 0x7f1f8039c9c0>
└ <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>
File "/home/noor/.config/xonsh/.venv/lib/python3.8/site-packages/prompt_toolkit/eventloop/async_generator.py", line 43, in runner
for item in get_iterable():
└ <function ThreadedCompleter.get_completions_async.<locals>.<lambda> at 0x7f1f81f13ee0>
File "/home/noor/.config/xonsh/xsh-src/xonsh/ptk_shell/completer.py", line 73, in get_completions
self.reserve_space()
│ └ <function PromptToolkitCompleter.reserve_space at 0x7f1f82aa4430>
└ <xonsh.ptk_shell.completer.PromptToolkitCompleter object at 0x7f1f82aed8e0>
> File "/home/noor/.config/xonsh/xsh-src/xonsh/ptk_shell/completer.py", line 123, in reserve_space
hash(app.layout.container.children[0].content.children[1].content)
│ │ └ Window(content=FormattedTextControl(HTML('No layout specified. Press <reverse>ENTER</reverse> to quit.')))
│ └ Layout(Window(content=FormattedTextControl(HTML('No layout specified. Press <reverse>ENTER</reverse> to quit.'))), current_wi...
└ <prompt_toolkit.application.dummy.DummyApplication object at 0x7f1f803eb8b0>
AttributeError: 'Window' object has no attribute 'children'
```
</details>
## Steps to Reproduce
<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->
1. it happens randomly. sometime doing simple `ls -a` triggers the error
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `xonsh/ptk_shell/completer.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """Completer implementation to use with prompt_toolkit."""
3 import os
4 import builtins
5
6 from prompt_toolkit.completion import Completer, Completion
7 from prompt_toolkit.auto_suggest import AutoSuggestFromHistory
8 from prompt_toolkit.application.current import get_app
9
10 from xonsh.completers.tools import RichCompletion
11
12
13 class PromptToolkitCompleter(Completer):
14 """Simple prompt_toolkit Completer object.
15
16 It just redirects requests to normal Xonsh completer.
17 """
18
19 def __init__(self, completer, ctx, shell):
20 """Takes instance of xonsh.completer.Completer, the xonsh execution
21 context, and the shell instance itself.
22 """
23 self.completer = completer
24 self.ctx = ctx
25 self.shell = shell
26 self.hist_suggester = AutoSuggestFromHistory()
27 self.current_document = None
28
29 def get_completions(self, document, complete_event):
30 """Returns a generator for list of completions."""
31 env = builtins.__xonsh__.env
32 should_complete = complete_event.completion_requested or env.get(
33 "UPDATE_COMPLETIONS_ON_KEYPRESS"
34 )
35 # Only generate completions when the user hits tab.
36 if not should_complete or self.completer is None:
37 return
38 # generate actual completions
39 line = document.current_line.lstrip()
40 line_ex = builtins.aliases.expand_alias(line)
41
42 endidx = document.cursor_position_col
43 begidx = line[:endidx].rfind(" ") + 1 if line[:endidx].rfind(" ") >= 0 else 0
44 prefix = line[begidx:endidx]
45 expand_offset = len(line_ex) - len(line)
46
47 # enable completers to access entire document
48 self.current_document = document
49
50 # get normal completions
51 completions, l = self.completer.complete(
52 prefix, line_ex, begidx + expand_offset, endidx + expand_offset, self.ctx
53 )
54
55 self.current_document = None
56
57 # completions from auto suggest
58 sug_comp = None
59 if env.get("AUTO_SUGGEST") and env.get("AUTO_SUGGEST_IN_COMPLETIONS"):
60 sug_comp = self.suggestion_completion(document, line)
61 if sug_comp is None:
62 pass
63 elif len(completions) == 0:
64 completions = (sug_comp,)
65 else:
66 completions = set(completions)
67 completions.discard(sug_comp)
68 completions = (sug_comp,) + tuple(sorted(completions))
69 # reserve space, if needed.
70 if len(completions) <= 1:
71 pass
72 elif len(os.path.commonprefix(completions)) <= len(prefix):
73 self.reserve_space()
74 # Find common prefix (strip quoting)
75 c_prefix = os.path.commonprefix([a.strip("'\"") for a in completions])
76 # Find last split symbol, do not trim the last part
77 while c_prefix:
78 if c_prefix[-1] in r"/\.:@,":
79 break
80 c_prefix = c_prefix[:-1]
81 # yield completions
82 if sug_comp is None:
83 pre = min(document.cursor_position_col - begidx, len(c_prefix))
84 else:
85 pre = len(c_prefix)
86 for comp in completions:
87 # do not display quote
88 if isinstance(comp, RichCompletion):
89 yield Completion(
90 comp,
91 -comp.prefix_len if comp.prefix_len is not None else -l,
92 display=comp.display,
93 display_meta=comp.description or None,
94 )
95 else:
96 disp = comp[pre:].strip("'\"")
97 yield Completion(comp, -l, display=disp)
98
99 def suggestion_completion(self, document, line):
100 """Provides a completion based on the current auto-suggestion."""
101 app = self.shell.prompter.app
102 sug = self.hist_suggester.get_suggestion(app.current_buffer, document)
103 if sug is None:
104 return None
105 comp, _, _ = sug.text.partition(" ")
106 _, _, prev = line.rpartition(" ")
107 return prev + comp
108
109 def reserve_space(self):
110 """Adjust the height for showing autocompletion menu."""
111 app = get_app()
112 render = app.renderer
113 window = app.layout.container.children[0].content.children[1].content
114
115 if window and window.render_info:
116 h = window.render_info.content_height
117 r = builtins.__xonsh__.env.get("COMPLETIONS_MENU_ROWS")
118 size = h + r
119 last_h = render._last_screen.height if render._last_screen else 0
120 last_h = max(render._min_available_height, last_h)
121 if last_h < size:
122 if render._last_screen:
123 render._last_screen.height = size
124
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/xonsh/ptk_shell/completer.py b/xonsh/ptk_shell/completer.py
--- a/xonsh/ptk_shell/completer.py
+++ b/xonsh/ptk_shell/completer.py
@@ -110,7 +110,7 @@
"""Adjust the height for showing autocompletion menu."""
app = get_app()
render = app.renderer
- window = app.layout.container.children[0].content.children[1].content
+ window = app.layout.current_window
if window and window.render_info:
h = window.render_info.content_height
| {"golden_diff": "diff --git a/xonsh/ptk_shell/completer.py b/xonsh/ptk_shell/completer.py\n--- a/xonsh/ptk_shell/completer.py\n+++ b/xonsh/ptk_shell/completer.py\n@@ -110,7 +110,7 @@\n \"\"\"Adjust the height for showing autocompletion menu.\"\"\"\n app = get_app()\n render = app.renderer\n- window = app.layout.container.children[0].content.children[1].content\n+ window = app.layout.current_window\n \n if window and window.render_info:\n h = window.render_info.content_height\n", "issue": "'Window' object has no attribute 'children'\n<!--- Provide a general summary of the issue in the Title above -->\r\n<!--- If you have a question along the lines of \"How do I do this Bash command in xonsh\"\r\nplease first look over the Bash to Xonsh translation guide: https://xon.sh/bash_to_xsh.html\r\nIf you don't find an answer there, please do open an issue! -->\r\n\r\n## xonfig\r\n\r\n<details>\r\n\r\n```\r\n+------------------+----------------------+\r\n| xonsh | 0.9.24 |\r\n| Git SHA | 74543ea9 |\r\n| Commit Date | Oct 10 15:12:47 2020 |\r\n| Python | 3.8.6 |\r\n| PLY | 3.11 |\r\n| have readline | True |\r\n| prompt toolkit | 3.0.3 |\r\n| shell type | prompt_toolkit |\r\n| pygments | 2.7.2 |\r\n| on posix | True |\r\n| on linux | True |\r\n| distro | manjaro |\r\n| on darwin | False |\r\n| on windows | False |\r\n| on cygwin | False |\r\n| on msys2 | False |\r\n| is superuser | False |\r\n| default encoding | utf-8 |\r\n| xonsh encoding | utf-8 |\r\n| encoding errors | surrogateescape |\r\n| on jupyter | False |\r\n| jupyter kernel | None |\r\n| xontrib 1 | abbrevs |\r\n| xontrib 2 | argcomplete |\r\n| xontrib 3 | autovox |\r\n| xontrib 4 | back2dir |\r\n| xontrib 5 | cmd_done |\r\n| xontrib 6 | hist_navigator |\r\n| xontrib 7 | jedi |\r\n| xontrib 8 | kitty |\r\n| xontrib 9 | pdb |\r\n| xontrib 10 | prompt_ret_code |\r\n| xontrib 11 | vox |\r\n| xontrib 12 | voxapi |\r\n+------------------+----------------------+\r\n```\r\n\r\n</details>\r\n\r\n## Expected Behavior\r\ninteractive shell runs without any error\r\n\r\n## Current Behavior\r\n<!--- Tell us what happens instead of the expected behavior -->\r\n<!--- If part of your bug report is a traceback, please first enter debug mode before triggering the error\r\nTo enter debug mode, set the environment variable `XONSH_DEBUG=1` _before_ starting `xonsh`.\r\nOn Linux and OSX, an easy way to to do this is to run `env XONSH_DEBUG=1 xonsh` -->\r\nI get the above error randomly when `$UPDATE_COMPLETIONS_ON_KEYPRESS = True`\r\n\r\n### Traceback (if applicable)\r\n\r\n<details>\r\n\r\n```\r\n2020-11-08 22:06:05.995 | INFO | xonsh.ptk_shell.completer:reserve_space:118 - 8735829483909\r\n2020-11-08 22:06:06.000 | ERROR | xonsh.ptk_shell.completer:reserve_space:126 - 'Window' object has no attribute 'children'\r\nTraceback (most recent call last):\r\n\r\n File \"/usr/lib/python3.8/threading.py\", line 890, in _bootstrap\r\n self._bootstrap_inner()\r\n \u2502 \u2514 <function Thread._bootstrap_inner at 0x7f1f895e01f0>\r\n \u2514 <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>\r\n File \"/usr/lib/python3.8/threading.py\", line 932, in _bootstrap_inner\r\n self.run()\r\n \u2502 \u2514 <function Thread.run at 0x7f1f895dfee0>\r\n \u2514 <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>\r\n File \"/usr/lib/python3.8/threading.py\", line 870, in run\r\n self._target(*self._args, **self._kwargs)\r\n \u2502 \u2502 \u2502 \u2502 \u2502 \u2514 {}\r\n \u2502 \u2502 \u2502 \u2502 \u2514 <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>\r\n \u2502 \u2502 \u2502 \u2514 (<weakref at 0x7f1f803ed270; to 'ThreadPoolExecutor' at 0x7f1f81e857c0>, <_queue.SimpleQueue object at 0x7f1f82a8b2c0>, None,...\r\n \u2502 \u2502 \u2514 <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>\r\n \u2502 \u2514 <function _worker at 0x7f1f81edb670>\r\n \u2514 <Thread(ThreadPoolExecutor-1_0, started daemon 139773312693824)>\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 80, in _worker\r\n work_item.run()\r\n \u2502 \u2514 <function _WorkItem.run at 0x7f1f81edb790>\r\n \u2514 <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>\r\n File \"/usr/lib/python3.8/concurrent/futures/thread.py\", line 57, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\n \u2502 \u2502 \u2502 \u2502 \u2502 \u2514 {}\r\n \u2502 \u2502 \u2502 \u2502 \u2514 <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>\r\n \u2502 \u2502 \u2502 \u2514 [<function generator_to_async_generator.<locals>.runner at 0x7f1f81f13e50>]\r\n \u2502 \u2502 \u2514 <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>\r\n \u2502 \u2514 <built-in method run of Context object at 0x7f1f8039c9c0>\r\n \u2514 <concurrent.futures.thread._WorkItem object at 0x7f1f803eb460>\r\n File \"/home/noor/.config/xonsh/.venv/lib/python3.8/site-packages/prompt_toolkit/eventloop/async_generator.py\", line 43, in runner\r\n for item in get_iterable():\r\n \u2514 <function ThreadedCompleter.get_completions_async.<locals>.<lambda> at 0x7f1f81f13ee0>\r\n\r\n File \"/home/noor/.config/xonsh/xsh-src/xonsh/ptk_shell/completer.py\", line 73, in get_completions\r\n self.reserve_space()\r\n \u2502 \u2514 <function PromptToolkitCompleter.reserve_space at 0x7f1f82aa4430>\r\n \u2514 <xonsh.ptk_shell.completer.PromptToolkitCompleter object at 0x7f1f82aed8e0>\r\n\r\n> File \"/home/noor/.config/xonsh/xsh-src/xonsh/ptk_shell/completer.py\", line 123, in reserve_space\r\n hash(app.layout.container.children[0].content.children[1].content)\r\n \u2502 \u2502 \u2514 Window(content=FormattedTextControl(HTML('No layout specified. Press <reverse>ENTER</reverse> to quit.')))\r\n \u2502 \u2514 Layout(Window(content=FormattedTextControl(HTML('No layout specified. Press <reverse>ENTER</reverse> to quit.'))), current_wi...\r\n \u2514 <prompt_toolkit.application.dummy.DummyApplication object at 0x7f1f803eb8b0>\r\n\r\nAttributeError: 'Window' object has no attribute 'children'\r\n\r\n```\r\n\r\n</details>\r\n\r\n## Steps to Reproduce\r\n<!--- Please try to write out a minimal reproducible snippet to trigger the bug, it will help us fix it! -->\r\n1. it happens randomly. sometime doing simple `ls -a` triggers the error\r\n\r\n## For community\r\n\u2b07\ufe0f **Please click the \ud83d\udc4d reaction instead of leaving a `+1` or \ud83d\udc4d comment**\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.completion import Completer, Completion\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom prompt_toolkit.application.current import get_app\n\nfrom xonsh.completers.tools import RichCompletion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx, shell):\n \"\"\"Takes instance of xonsh.completer.Completer, the xonsh execution\n context, and the shell instance itself.\n \"\"\"\n self.completer = completer\n self.ctx = ctx\n self.shell = shell\n self.hist_suggester = AutoSuggestFromHistory()\n self.current_document = None\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n env = builtins.__xonsh__.env\n should_complete = complete_event.completion_requested or env.get(\n \"UPDATE_COMPLETIONS_ON_KEYPRESS\"\n )\n # Only generate completions when the user hits tab.\n if not should_complete or self.completer is None:\n return\n # generate actual completions\n line = document.current_line.lstrip()\n line_ex = builtins.aliases.expand_alias(line)\n\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(\" \") + 1 if line[:endidx].rfind(\" \") >= 0 else 0\n prefix = line[begidx:endidx]\n expand_offset = len(line_ex) - len(line)\n\n # enable completers to access entire document\n self.current_document = document\n\n # get normal completions\n completions, l = self.completer.complete(\n prefix, line_ex, begidx + expand_offset, endidx + expand_offset, self.ctx\n )\n\n self.current_document = None\n\n # completions from auto suggest\n sug_comp = None\n if env.get(\"AUTO_SUGGEST\") and env.get(\"AUTO_SUGGEST_IN_COMPLETIONS\"):\n sug_comp = self.suggestion_completion(document, line)\n if sug_comp is None:\n pass\n elif len(completions) == 0:\n completions = (sug_comp,)\n else:\n completions = set(completions)\n completions.discard(sug_comp)\n completions = (sug_comp,) + tuple(sorted(completions))\n # reserve space, if needed.\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n # Find common prefix (strip quoting)\n c_prefix = os.path.commonprefix([a.strip(\"'\\\"\") for a in completions])\n # Find last split symbol, do not trim the last part\n while c_prefix:\n if c_prefix[-1] in r\"/\\.:@,\":\n break\n c_prefix = c_prefix[:-1]\n # yield completions\n if sug_comp is None:\n pre = min(document.cursor_position_col - begidx, len(c_prefix))\n else:\n pre = len(c_prefix)\n for comp in completions:\n # do not display quote\n if isinstance(comp, RichCompletion):\n yield Completion(\n comp,\n -comp.prefix_len if comp.prefix_len is not None else -l,\n display=comp.display,\n display_meta=comp.description or None,\n )\n else:\n disp = comp[pre:].strip(\"'\\\"\")\n yield Completion(comp, -l, display=disp)\n\n def suggestion_completion(self, document, line):\n \"\"\"Provides a completion based on the current auto-suggestion.\"\"\"\n app = self.shell.prompter.app\n sug = self.hist_suggester.get_suggestion(app.current_buffer, document)\n if sug is None:\n return None\n comp, _, _ = sug.text.partition(\" \")\n _, _, prev = line.rpartition(\" \")\n return prev + comp\n\n def reserve_space(self):\n \"\"\"Adjust the height for showing autocompletion menu.\"\"\"\n app = get_app()\n render = app.renderer\n window = app.layout.container.children[0].content.children[1].content\n\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh__.env.get(\"COMPLETIONS_MENU_ROWS\")\n size = h + r\n last_h = render._last_screen.height if render._last_screen else 0\n last_h = max(render._min_available_height, last_h)\n if last_h < size:\n if render._last_screen:\n render._last_screen.height = size\n", "path": "xonsh/ptk_shell/completer.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"Completer implementation to use with prompt_toolkit.\"\"\"\nimport os\nimport builtins\n\nfrom prompt_toolkit.completion import Completer, Completion\nfrom prompt_toolkit.auto_suggest import AutoSuggestFromHistory\nfrom prompt_toolkit.application.current import get_app\n\nfrom xonsh.completers.tools import RichCompletion\n\n\nclass PromptToolkitCompleter(Completer):\n \"\"\"Simple prompt_toolkit Completer object.\n\n It just redirects requests to normal Xonsh completer.\n \"\"\"\n\n def __init__(self, completer, ctx, shell):\n \"\"\"Takes instance of xonsh.completer.Completer, the xonsh execution\n context, and the shell instance itself.\n \"\"\"\n self.completer = completer\n self.ctx = ctx\n self.shell = shell\n self.hist_suggester = AutoSuggestFromHistory()\n self.current_document = None\n\n def get_completions(self, document, complete_event):\n \"\"\"Returns a generator for list of completions.\"\"\"\n env = builtins.__xonsh__.env\n should_complete = complete_event.completion_requested or env.get(\n \"UPDATE_COMPLETIONS_ON_KEYPRESS\"\n )\n # Only generate completions when the user hits tab.\n if not should_complete or self.completer is None:\n return\n # generate actual completions\n line = document.current_line.lstrip()\n line_ex = builtins.aliases.expand_alias(line)\n\n endidx = document.cursor_position_col\n begidx = line[:endidx].rfind(\" \") + 1 if line[:endidx].rfind(\" \") >= 0 else 0\n prefix = line[begidx:endidx]\n expand_offset = len(line_ex) - len(line)\n\n # enable completers to access entire document\n self.current_document = document\n\n # get normal completions\n completions, l = self.completer.complete(\n prefix, line_ex, begidx + expand_offset, endidx + expand_offset, self.ctx\n )\n\n self.current_document = None\n\n # completions from auto suggest\n sug_comp = None\n if env.get(\"AUTO_SUGGEST\") and env.get(\"AUTO_SUGGEST_IN_COMPLETIONS\"):\n sug_comp = self.suggestion_completion(document, line)\n if sug_comp is None:\n pass\n elif len(completions) == 0:\n completions = (sug_comp,)\n else:\n completions = set(completions)\n completions.discard(sug_comp)\n completions = (sug_comp,) + tuple(sorted(completions))\n # reserve space, if needed.\n if len(completions) <= 1:\n pass\n elif len(os.path.commonprefix(completions)) <= len(prefix):\n self.reserve_space()\n # Find common prefix (strip quoting)\n c_prefix = os.path.commonprefix([a.strip(\"'\\\"\") for a in completions])\n # Find last split symbol, do not trim the last part\n while c_prefix:\n if c_prefix[-1] in r\"/\\.:@,\":\n break\n c_prefix = c_prefix[:-1]\n # yield completions\n if sug_comp is None:\n pre = min(document.cursor_position_col - begidx, len(c_prefix))\n else:\n pre = len(c_prefix)\n for comp in completions:\n # do not display quote\n if isinstance(comp, RichCompletion):\n yield Completion(\n comp,\n -comp.prefix_len if comp.prefix_len is not None else -l,\n display=comp.display,\n display_meta=comp.description or None,\n )\n else:\n disp = comp[pre:].strip(\"'\\\"\")\n yield Completion(comp, -l, display=disp)\n\n def suggestion_completion(self, document, line):\n \"\"\"Provides a completion based on the current auto-suggestion.\"\"\"\n app = self.shell.prompter.app\n sug = self.hist_suggester.get_suggestion(app.current_buffer, document)\n if sug is None:\n return None\n comp, _, _ = sug.text.partition(\" \")\n _, _, prev = line.rpartition(\" \")\n return prev + comp\n\n def reserve_space(self):\n \"\"\"Adjust the height for showing autocompletion menu.\"\"\"\n app = get_app()\n render = app.renderer\n window = app.layout.current_window\n\n if window and window.render_info:\n h = window.render_info.content_height\n r = builtins.__xonsh__.env.get(\"COMPLETIONS_MENU_ROWS\")\n size = h + r\n last_h = render._last_screen.height if render._last_screen else 0\n last_h = max(render._min_available_height, last_h)\n if last_h < size:\n if render._last_screen:\n render._last_screen.height = size\n", "path": "xonsh/ptk_shell/completer.py"}]} | 3,510 | 138 |
gh_patches_debug_35836 | rasdani/github-patches | git_diff | pyca__cryptography-1532 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for loading DSA OpenSSH public keys
Should be straightforward to add support to the existing code.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cryptography/hazmat/primitives/serialization.py`
Content:
```
1 # This file is dual licensed under the terms of the Apache License, Version
2 # 2.0, and the BSD License. See the LICENSE file in the root of this repository
3 # for complete details.
4
5 from __future__ import absolute_import, division, print_function
6
7 import base64
8 import struct
9 import warnings
10
11 from cryptography import utils
12 from cryptography.exceptions import UnsupportedAlgorithm
13 from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
14
15
16 def load_pem_traditional_openssl_private_key(data, password, backend):
17 warnings.warn(
18 "load_pem_traditional_openssl_private_key is deprecated and will be "
19 "removed in a future version, use load_pem_private_key instead.",
20 utils.DeprecatedIn06,
21 stacklevel=2
22 )
23
24 return backend.load_traditional_openssl_pem_private_key(
25 data, password
26 )
27
28
29 def load_pem_pkcs8_private_key(data, password, backend):
30 warnings.warn(
31 "load_pem_pkcs8_private_key is deprecated and will be removed in a "
32 "future version, use load_pem_private_key instead.",
33 utils.DeprecatedIn06,
34 stacklevel=2
35 )
36
37 return backend.load_pkcs8_pem_private_key(data, password)
38
39
40 def load_pem_private_key(data, password, backend):
41 return backend.load_pem_private_key(data, password)
42
43
44 def load_pem_public_key(data, backend):
45 return backend.load_pem_public_key(data)
46
47
48 def load_ssh_public_key(data, backend):
49 key_parts = data.split(b' ')
50
51 if len(key_parts) != 2 and len(key_parts) != 3:
52 raise ValueError(
53 'Key is not in the proper format or contains extra data.')
54
55 key_type = key_parts[0]
56 key_body = key_parts[1]
57
58 if not key_type.startswith(b'ssh-'):
59 raise ValueError('SSH-formatted keys must begin with \'ssh-\'.')
60
61 if not key_type.startswith(b'ssh-rsa'):
62 raise UnsupportedAlgorithm('Only RSA keys are currently supported.')
63
64 return _load_ssh_rsa_public_key(key_body, backend)
65
66
67 def _load_ssh_rsa_public_key(key_body, backend):
68 data = base64.b64decode(key_body)
69
70 key_type, rest = _read_next_string(data)
71 e, rest = _read_next_mpint(rest)
72 n, rest = _read_next_mpint(rest)
73
74 if key_type != b'ssh-rsa':
75 raise ValueError(
76 'Key header and key body contain different key type values.')
77
78 if rest:
79 raise ValueError('Key body contains extra bytes.')
80
81 return backend.load_rsa_public_numbers(RSAPublicNumbers(e, n))
82
83
84 def _read_next_string(data):
85 """Retrieves the next RFC 4251 string value from the data."""
86 str_len, = struct.unpack('>I', data[:4])
87 return data[4:4 + str_len], data[4 + str_len:]
88
89
90 def _read_next_mpint(data):
91 """
92 Reads the next mpint from the data.
93
94 Currently, all mpints are interpreted as unsigned.
95 """
96 mpint_data, rest = _read_next_string(data)
97
98 return _int_from_bytes(mpint_data, byteorder='big', signed=False), rest
99
100
101 if hasattr(int, "from_bytes"):
102 _int_from_bytes = int.from_bytes
103 else:
104 def _int_from_bytes(data, byteorder, signed=False):
105 assert byteorder == 'big'
106 assert not signed
107
108 if len(data) % 4 != 0:
109 data = (b'\x00' * (4 - (len(data) % 4))) + data
110
111 result = 0
112
113 while len(data) > 0:
114 digit, = struct.unpack('>I', data[:4])
115 result = (result << 32) + digit
116 data = data[4:]
117
118 return result
119
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/cryptography/hazmat/primitives/serialization.py b/src/cryptography/hazmat/primitives/serialization.py
--- a/src/cryptography/hazmat/primitives/serialization.py
+++ b/src/cryptography/hazmat/primitives/serialization.py
@@ -10,6 +10,9 @@
from cryptography import utils
from cryptography.exceptions import UnsupportedAlgorithm
+from cryptography.hazmat.primitives.asymmetric.dsa import (
+ DSAParameterNumbers, DSAPublicNumbers
+)
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers
@@ -55,19 +58,23 @@
key_type = key_parts[0]
key_body = key_parts[1]
- if not key_type.startswith(b'ssh-'):
- raise ValueError('SSH-formatted keys must begin with \'ssh-\'.')
+ try:
+ decoded_data = base64.b64decode(key_body)
+ except TypeError:
+ raise ValueError('Key is not in the proper format.')
- if not key_type.startswith(b'ssh-rsa'):
- raise UnsupportedAlgorithm('Only RSA keys are currently supported.')
+ if key_type == b'ssh-rsa':
+ return _load_ssh_rsa_public_key(decoded_data, backend)
+ elif key_type == b'ssh-dss':
+ return _load_ssh_dss_public_key(decoded_data, backend)
+ else:
+ raise UnsupportedAlgorithm(
+ 'Only RSA and DSA keys are currently supported.'
+ )
- return _load_ssh_rsa_public_key(key_body, backend)
-
-def _load_ssh_rsa_public_key(key_body, backend):
- data = base64.b64decode(key_body)
-
- key_type, rest = _read_next_string(data)
+def _load_ssh_rsa_public_key(decoded_data, backend):
+ key_type, rest = _read_next_string(decoded_data)
e, rest = _read_next_mpint(rest)
n, rest = _read_next_mpint(rest)
@@ -81,6 +88,26 @@
return backend.load_rsa_public_numbers(RSAPublicNumbers(e, n))
+def _load_ssh_dss_public_key(decoded_data, backend):
+ key_type, rest = _read_next_string(decoded_data)
+ p, rest = _read_next_mpint(rest)
+ q, rest = _read_next_mpint(rest)
+ g, rest = _read_next_mpint(rest)
+ y, rest = _read_next_mpint(rest)
+
+ if key_type != b'ssh-dss':
+ raise ValueError(
+ 'Key header and key body contain different key type values.')
+
+ if rest:
+ raise ValueError('Key body contains extra bytes.')
+
+ parameter_numbers = DSAParameterNumbers(p, q, g)
+ public_numbers = DSAPublicNumbers(y, parameter_numbers)
+
+ return backend.load_dsa_public_numbers(public_numbers)
+
+
def _read_next_string(data):
"""Retrieves the next RFC 4251 string value from the data."""
str_len, = struct.unpack('>I', data[:4])
| {"golden_diff": "diff --git a/src/cryptography/hazmat/primitives/serialization.py b/src/cryptography/hazmat/primitives/serialization.py\n--- a/src/cryptography/hazmat/primitives/serialization.py\n+++ b/src/cryptography/hazmat/primitives/serialization.py\n@@ -10,6 +10,9 @@\n \n from cryptography import utils\n from cryptography.exceptions import UnsupportedAlgorithm\n+from cryptography.hazmat.primitives.asymmetric.dsa import (\n+ DSAParameterNumbers, DSAPublicNumbers\n+)\n from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n \n \n@@ -55,19 +58,23 @@\n key_type = key_parts[0]\n key_body = key_parts[1]\n \n- if not key_type.startswith(b'ssh-'):\n- raise ValueError('SSH-formatted keys must begin with \\'ssh-\\'.')\n+ try:\n+ decoded_data = base64.b64decode(key_body)\n+ except TypeError:\n+ raise ValueError('Key is not in the proper format.')\n \n- if not key_type.startswith(b'ssh-rsa'):\n- raise UnsupportedAlgorithm('Only RSA keys are currently supported.')\n+ if key_type == b'ssh-rsa':\n+ return _load_ssh_rsa_public_key(decoded_data, backend)\n+ elif key_type == b'ssh-dss':\n+ return _load_ssh_dss_public_key(decoded_data, backend)\n+ else:\n+ raise UnsupportedAlgorithm(\n+ 'Only RSA and DSA keys are currently supported.'\n+ )\n \n- return _load_ssh_rsa_public_key(key_body, backend)\n \n-\n-def _load_ssh_rsa_public_key(key_body, backend):\n- data = base64.b64decode(key_body)\n-\n- key_type, rest = _read_next_string(data)\n+def _load_ssh_rsa_public_key(decoded_data, backend):\n+ key_type, rest = _read_next_string(decoded_data)\n e, rest = _read_next_mpint(rest)\n n, rest = _read_next_mpint(rest)\n \n@@ -81,6 +88,26 @@\n return backend.load_rsa_public_numbers(RSAPublicNumbers(e, n))\n \n \n+def _load_ssh_dss_public_key(decoded_data, backend):\n+ key_type, rest = _read_next_string(decoded_data)\n+ p, rest = _read_next_mpint(rest)\n+ q, rest = _read_next_mpint(rest)\n+ g, rest = _read_next_mpint(rest)\n+ y, rest = _read_next_mpint(rest)\n+\n+ if key_type != b'ssh-dss':\n+ raise ValueError(\n+ 'Key header and key body contain different key type values.')\n+\n+ if rest:\n+ raise ValueError('Key body contains extra bytes.')\n+\n+ parameter_numbers = DSAParameterNumbers(p, q, g)\n+ public_numbers = DSAPublicNumbers(y, parameter_numbers)\n+\n+ return backend.load_dsa_public_numbers(public_numbers)\n+\n+\n def _read_next_string(data):\n \"\"\"Retrieves the next RFC 4251 string value from the data.\"\"\"\n str_len, = struct.unpack('>I', data[:4])\n", "issue": "Add support for loading DSA OpenSSH public keys\nShould be straightforward to add support to the existing code.\n\n", "before_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport struct\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n\n\ndef load_pem_traditional_openssl_private_key(data, password, backend):\n warnings.warn(\n \"load_pem_traditional_openssl_private_key is deprecated and will be \"\n \"removed in a future version, use load_pem_private_key instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n\n return backend.load_traditional_openssl_pem_private_key(\n data, password\n )\n\n\ndef load_pem_pkcs8_private_key(data, password, backend):\n warnings.warn(\n \"load_pem_pkcs8_private_key is deprecated and will be removed in a \"\n \"future version, use load_pem_private_key instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n\n return backend.load_pkcs8_pem_private_key(data, password)\n\n\ndef load_pem_private_key(data, password, backend):\n return backend.load_pem_private_key(data, password)\n\n\ndef load_pem_public_key(data, backend):\n return backend.load_pem_public_key(data)\n\n\ndef load_ssh_public_key(data, backend):\n key_parts = data.split(b' ')\n\n if len(key_parts) != 2 and len(key_parts) != 3:\n raise ValueError(\n 'Key is not in the proper format or contains extra data.')\n\n key_type = key_parts[0]\n key_body = key_parts[1]\n\n if not key_type.startswith(b'ssh-'):\n raise ValueError('SSH-formatted keys must begin with \\'ssh-\\'.')\n\n if not key_type.startswith(b'ssh-rsa'):\n raise UnsupportedAlgorithm('Only RSA keys are currently supported.')\n\n return _load_ssh_rsa_public_key(key_body, backend)\n\n\ndef _load_ssh_rsa_public_key(key_body, backend):\n data = base64.b64decode(key_body)\n\n key_type, rest = _read_next_string(data)\n e, rest = _read_next_mpint(rest)\n n, rest = _read_next_mpint(rest)\n\n if key_type != b'ssh-rsa':\n raise ValueError(\n 'Key header and key body contain different key type values.')\n\n if rest:\n raise ValueError('Key body contains extra bytes.')\n\n return backend.load_rsa_public_numbers(RSAPublicNumbers(e, n))\n\n\ndef _read_next_string(data):\n \"\"\"Retrieves the next RFC 4251 string value from the data.\"\"\"\n str_len, = struct.unpack('>I', data[:4])\n return data[4:4 + str_len], data[4 + str_len:]\n\n\ndef _read_next_mpint(data):\n \"\"\"\n Reads the next mpint from the data.\n\n Currently, all mpints are interpreted as unsigned.\n \"\"\"\n mpint_data, rest = _read_next_string(data)\n\n return _int_from_bytes(mpint_data, byteorder='big', signed=False), rest\n\n\nif hasattr(int, \"from_bytes\"):\n _int_from_bytes = int.from_bytes\nelse:\n def _int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n", "path": "src/cryptography/hazmat/primitives/serialization.py"}], "after_files": [{"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\nfrom __future__ import absolute_import, division, print_function\n\nimport base64\nimport struct\nimport warnings\n\nfrom cryptography import utils\nfrom cryptography.exceptions import UnsupportedAlgorithm\nfrom cryptography.hazmat.primitives.asymmetric.dsa import (\n DSAParameterNumbers, DSAPublicNumbers\n)\nfrom cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicNumbers\n\n\ndef load_pem_traditional_openssl_private_key(data, password, backend):\n warnings.warn(\n \"load_pem_traditional_openssl_private_key is deprecated and will be \"\n \"removed in a future version, use load_pem_private_key instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n\n return backend.load_traditional_openssl_pem_private_key(\n data, password\n )\n\n\ndef load_pem_pkcs8_private_key(data, password, backend):\n warnings.warn(\n \"load_pem_pkcs8_private_key is deprecated and will be removed in a \"\n \"future version, use load_pem_private_key instead.\",\n utils.DeprecatedIn06,\n stacklevel=2\n )\n\n return backend.load_pkcs8_pem_private_key(data, password)\n\n\ndef load_pem_private_key(data, password, backend):\n return backend.load_pem_private_key(data, password)\n\n\ndef load_pem_public_key(data, backend):\n return backend.load_pem_public_key(data)\n\n\ndef load_ssh_public_key(data, backend):\n key_parts = data.split(b' ')\n\n if len(key_parts) != 2 and len(key_parts) != 3:\n raise ValueError(\n 'Key is not in the proper format or contains extra data.')\n\n key_type = key_parts[0]\n key_body = key_parts[1]\n\n try:\n decoded_data = base64.b64decode(key_body)\n except TypeError:\n raise ValueError('Key is not in the proper format.')\n\n if key_type == b'ssh-rsa':\n return _load_ssh_rsa_public_key(decoded_data, backend)\n elif key_type == b'ssh-dss':\n return _load_ssh_dss_public_key(decoded_data, backend)\n else:\n raise UnsupportedAlgorithm(\n 'Only RSA and DSA keys are currently supported.'\n )\n\n\ndef _load_ssh_rsa_public_key(decoded_data, backend):\n key_type, rest = _read_next_string(decoded_data)\n e, rest = _read_next_mpint(rest)\n n, rest = _read_next_mpint(rest)\n\n if key_type != b'ssh-rsa':\n raise ValueError(\n 'Key header and key body contain different key type values.')\n\n if rest:\n raise ValueError('Key body contains extra bytes.')\n\n return backend.load_rsa_public_numbers(RSAPublicNumbers(e, n))\n\n\ndef _load_ssh_dss_public_key(decoded_data, backend):\n key_type, rest = _read_next_string(decoded_data)\n p, rest = _read_next_mpint(rest)\n q, rest = _read_next_mpint(rest)\n g, rest = _read_next_mpint(rest)\n y, rest = _read_next_mpint(rest)\n\n if key_type != b'ssh-dss':\n raise ValueError(\n 'Key header and key body contain different key type values.')\n\n if rest:\n raise ValueError('Key body contains extra bytes.')\n\n parameter_numbers = DSAParameterNumbers(p, q, g)\n public_numbers = DSAPublicNumbers(y, parameter_numbers)\n\n return backend.load_dsa_public_numbers(public_numbers)\n\n\ndef _read_next_string(data):\n \"\"\"Retrieves the next RFC 4251 string value from the data.\"\"\"\n str_len, = struct.unpack('>I', data[:4])\n return data[4:4 + str_len], data[4 + str_len:]\n\n\ndef _read_next_mpint(data):\n \"\"\"\n Reads the next mpint from the data.\n\n Currently, all mpints are interpreted as unsigned.\n \"\"\"\n mpint_data, rest = _read_next_string(data)\n\n return _int_from_bytes(mpint_data, byteorder='big', signed=False), rest\n\n\nif hasattr(int, \"from_bytes\"):\n _int_from_bytes = int.from_bytes\nelse:\n def _int_from_bytes(data, byteorder, signed=False):\n assert byteorder == 'big'\n assert not signed\n\n if len(data) % 4 != 0:\n data = (b'\\x00' * (4 - (len(data) % 4))) + data\n\n result = 0\n\n while len(data) > 0:\n digit, = struct.unpack('>I', data[:4])\n result = (result << 32) + digit\n data = data[4:]\n\n return result\n", "path": "src/cryptography/hazmat/primitives/serialization.py"}]} | 1,406 | 685 |
gh_patches_debug_23749 | rasdani/github-patches | git_diff | SeldonIO__MLServer-301 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tempo example fails when parallel inference is enabled
When parallel inference is enabled, the [outlier example using the Tempo runtime](https://tempo.readthedocs.io/en/latest/examples/outlier/README.html)seems to fail. In particular, it seems that either the `cifar10-service` or the `outlier` containers block the request path and never return a response.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlserver/parallel.py`
Content:
```
1 import asyncio
2
3 from functools import wraps
4 from concurrent.futures import ProcessPoolExecutor
5 from typing import Any, Coroutine, Callable, Optional
6
7 from .errors import MLServerError
8 from .settings import ModelSettings
9 from .model import MLModel
10 from .types import InferenceRequest, InferenceResponse
11
12 _InferencePoolAttr = "__inference_pool__"
13
14 # NOTE: Workaround for mypy
15 _mp_model: MLModel
16
17
18 class InvalidParallelMethod(MLServerError):
19 def __init__(self, method_name: str, reason: Optional[str] = None):
20 msg = f"Method {method_name} can't be parallelised"
21 if reason:
22 msg += f": {reason}"
23
24 super().__init__(msg)
25
26
27 def _mp_load(model_settings: ModelSettings):
28 """
29 This method is meant to run internally in the multiprocessing workers.
30 The loading needs to run synchronously, since the initializer argument
31 doesn't support coroutines.
32 """
33 # NOTE: The global `_mp_model` variable is shared with the `_mp_predict`
34 # method.
35 # This global variable should only be used within the inference
36 # multiprocessing workers.
37 global _mp_model
38
39 model_class = model_settings.implementation
40 _mp_model = model_class(model_settings) # type: ignore
41 return asyncio.run(_mp_model.load())
42
43
44 def _mp_predict(payload: InferenceRequest) -> InferenceResponse:
45 """
46 This method is meant to run internally in the multiprocessing workers.
47 The prediction needs to run synchronously, since multiprocessing
48 doesn't know how to serialise coroutines.
49 """
50 # NOTE: `_mp_model` is a global variable initialised in the `_mp_load`
51 # method.
52 # This global variable is only to be used within the inference worker
53 # context.
54 global _mp_model
55
56 return asyncio.run(_mp_model.predict(payload))
57
58
59 class InferencePool:
60 """
61 The InferencePool class represents a pool of workers where we can run
62 inference on.
63
64 Under the hood, it's responsible for managing a pool of multiprocessing
65 workers, where the model is loaded.
66 This approach lets MLServer work around the GIL to make sure that inference
67 can occur in parallel across multiple models or instances of a model.
68 """
69
70 def __init__(self, model: MLModel):
71 parallel_workers = model.settings.parallel_workers
72 self._executor = ProcessPoolExecutor(
73 max_workers=parallel_workers,
74 initializer=_mp_load,
75 initargs=(model.settings,),
76 )
77
78 async def predict(self, payload: InferenceRequest) -> InferenceResponse:
79 # What if we serialise payload?
80 loop = asyncio.get_running_loop()
81 return await loop.run_in_executor(self._executor, _mp_predict, payload)
82
83 def __del__(self):
84 self._executor.shutdown(wait=True)
85
86
87 def parallel(f: Callable[[InferenceRequest], Coroutine[Any, Any, InferenceResponse]]):
88 """
89 Decorator to attach to model's methods so that they run in parallel.
90 By default, this will get attached to every model's "inference" method.
91
92 NOTE: At the moment, this method only works with `predict()`.
93 """
94 # TODO: Extend to multiple methods
95 @wraps(f)
96 async def _inner(payload: InferenceRequest) -> InferenceResponse:
97 if not hasattr(f, "__self__"):
98 raise InvalidParallelMethod(f.__name__, reason="method is not bound")
99
100 model = getattr(f, "__self__")
101 if not hasattr(model, _InferencePoolAttr):
102 raise InvalidParallelMethod(
103 f.__name__, reason="inference pool has not been loaded"
104 )
105
106 pool = getattr(model, _InferencePoolAttr)
107 return await pool.predict(payload)
108
109 return _inner
110
111
112 async def load_inference_pool(model: MLModel):
113 if model.settings.parallel_workers == 0:
114 # When parallel workers is set to 0, disable parallel inference
115 return model
116
117 pool = InferencePool(model)
118 setattr(model, _InferencePoolAttr, pool)
119
120 # Decorate predict method
121 setattr(model, "predict", parallel(model.predict))
122
123 return model
124
125
126 async def unload_inference_pool(model: MLModel):
127 pool = getattr(model, _InferencePoolAttr)
128 if not pool:
129 return
130
131 pool.__del__()
132 delattr(model, _InferencePoolAttr)
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mlserver/parallel.py b/mlserver/parallel.py
--- a/mlserver/parallel.py
+++ b/mlserver/parallel.py
@@ -1,4 +1,5 @@
import asyncio
+import multiprocessing as mp
from functools import wraps
from concurrent.futures import ProcessPoolExecutor
@@ -52,7 +53,6 @@
# This global variable is only to be used within the inference worker
# context.
global _mp_model
-
return asyncio.run(_mp_model.predict(payload))
@@ -69,8 +69,14 @@
def __init__(self, model: MLModel):
parallel_workers = model.settings.parallel_workers
+
+ # Use 'spawn' instead of 'fork' to ensure that models are loaded in a
+ # clean environment (e.g. to avoid issues like
+ # https://github.com/tensorflow/tensorflow/issues/8220)
+ ctx = mp.get_context("spawn")
self._executor = ProcessPoolExecutor(
max_workers=parallel_workers,
+ mp_context=ctx,
initializer=_mp_load,
initargs=(model.settings,),
)
| {"golden_diff": "diff --git a/mlserver/parallel.py b/mlserver/parallel.py\n--- a/mlserver/parallel.py\n+++ b/mlserver/parallel.py\n@@ -1,4 +1,5 @@\n import asyncio\n+import multiprocessing as mp\n \n from functools import wraps\n from concurrent.futures import ProcessPoolExecutor\n@@ -52,7 +53,6 @@\n # This global variable is only to be used within the inference worker\n # context.\n global _mp_model\n-\n return asyncio.run(_mp_model.predict(payload))\n \n \n@@ -69,8 +69,14 @@\n \n def __init__(self, model: MLModel):\n parallel_workers = model.settings.parallel_workers\n+\n+ # Use 'spawn' instead of 'fork' to ensure that models are loaded in a\n+ # clean environment (e.g. to avoid issues like\n+ # https://github.com/tensorflow/tensorflow/issues/8220)\n+ ctx = mp.get_context(\"spawn\")\n self._executor = ProcessPoolExecutor(\n max_workers=parallel_workers,\n+ mp_context=ctx,\n initializer=_mp_load,\n initargs=(model.settings,),\n )\n", "issue": "Tempo example fails when parallel inference is enabled\nWhen parallel inference is enabled, the [outlier example using the Tempo runtime](https://tempo.readthedocs.io/en/latest/examples/outlier/README.html)seems to fail. In particular, it seems that either the `cifar10-service` or the `outlier` containers block the request path and never return a response.\n", "before_files": [{"content": "import asyncio\n\nfrom functools import wraps\nfrom concurrent.futures import ProcessPoolExecutor\nfrom typing import Any, Coroutine, Callable, Optional\n\nfrom .errors import MLServerError\nfrom .settings import ModelSettings\nfrom .model import MLModel\nfrom .types import InferenceRequest, InferenceResponse\n\n_InferencePoolAttr = \"__inference_pool__\"\n\n# NOTE: Workaround for mypy\n_mp_model: MLModel\n\n\nclass InvalidParallelMethod(MLServerError):\n def __init__(self, method_name: str, reason: Optional[str] = None):\n msg = f\"Method {method_name} can't be parallelised\"\n if reason:\n msg += f\": {reason}\"\n\n super().__init__(msg)\n\n\ndef _mp_load(model_settings: ModelSettings):\n \"\"\"\n This method is meant to run internally in the multiprocessing workers.\n The loading needs to run synchronously, since the initializer argument\n doesn't support coroutines.\n \"\"\"\n # NOTE: The global `_mp_model` variable is shared with the `_mp_predict`\n # method.\n # This global variable should only be used within the inference\n # multiprocessing workers.\n global _mp_model\n\n model_class = model_settings.implementation\n _mp_model = model_class(model_settings) # type: ignore\n return asyncio.run(_mp_model.load())\n\n\ndef _mp_predict(payload: InferenceRequest) -> InferenceResponse:\n \"\"\"\n This method is meant to run internally in the multiprocessing workers.\n The prediction needs to run synchronously, since multiprocessing\n doesn't know how to serialise coroutines.\n \"\"\"\n # NOTE: `_mp_model` is a global variable initialised in the `_mp_load`\n # method.\n # This global variable is only to be used within the inference worker\n # context.\n global _mp_model\n\n return asyncio.run(_mp_model.predict(payload))\n\n\nclass InferencePool:\n \"\"\"\n The InferencePool class represents a pool of workers where we can run\n inference on.\n\n Under the hood, it's responsible for managing a pool of multiprocessing\n workers, where the model is loaded.\n This approach lets MLServer work around the GIL to make sure that inference\n can occur in parallel across multiple models or instances of a model.\n \"\"\"\n\n def __init__(self, model: MLModel):\n parallel_workers = model.settings.parallel_workers\n self._executor = ProcessPoolExecutor(\n max_workers=parallel_workers,\n initializer=_mp_load,\n initargs=(model.settings,),\n )\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n # What if we serialise payload?\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(self._executor, _mp_predict, payload)\n\n def __del__(self):\n self._executor.shutdown(wait=True)\n\n\ndef parallel(f: Callable[[InferenceRequest], Coroutine[Any, Any, InferenceResponse]]):\n \"\"\"\n Decorator to attach to model's methods so that they run in parallel.\n By default, this will get attached to every model's \"inference\" method.\n\n NOTE: At the moment, this method only works with `predict()`.\n \"\"\"\n # TODO: Extend to multiple methods\n @wraps(f)\n async def _inner(payload: InferenceRequest) -> InferenceResponse:\n if not hasattr(f, \"__self__\"):\n raise InvalidParallelMethod(f.__name__, reason=\"method is not bound\")\n\n model = getattr(f, \"__self__\")\n if not hasattr(model, _InferencePoolAttr):\n raise InvalidParallelMethod(\n f.__name__, reason=\"inference pool has not been loaded\"\n )\n\n pool = getattr(model, _InferencePoolAttr)\n return await pool.predict(payload)\n\n return _inner\n\n\nasync def load_inference_pool(model: MLModel):\n if model.settings.parallel_workers == 0:\n # When parallel workers is set to 0, disable parallel inference\n return model\n\n pool = InferencePool(model)\n setattr(model, _InferencePoolAttr, pool)\n\n # Decorate predict method\n setattr(model, \"predict\", parallel(model.predict))\n\n return model\n\n\nasync def unload_inference_pool(model: MLModel):\n pool = getattr(model, _InferencePoolAttr)\n if not pool:\n return\n\n pool.__del__()\n delattr(model, _InferencePoolAttr)\n", "path": "mlserver/parallel.py"}], "after_files": [{"content": "import asyncio\nimport multiprocessing as mp\n\nfrom functools import wraps\nfrom concurrent.futures import ProcessPoolExecutor\nfrom typing import Any, Coroutine, Callable, Optional\n\nfrom .errors import MLServerError\nfrom .settings import ModelSettings\nfrom .model import MLModel\nfrom .types import InferenceRequest, InferenceResponse\n\n_InferencePoolAttr = \"__inference_pool__\"\n\n# NOTE: Workaround for mypy\n_mp_model: MLModel\n\n\nclass InvalidParallelMethod(MLServerError):\n def __init__(self, method_name: str, reason: Optional[str] = None):\n msg = f\"Method {method_name} can't be parallelised\"\n if reason:\n msg += f\": {reason}\"\n\n super().__init__(msg)\n\n\ndef _mp_load(model_settings: ModelSettings):\n \"\"\"\n This method is meant to run internally in the multiprocessing workers.\n The loading needs to run synchronously, since the initializer argument\n doesn't support coroutines.\n \"\"\"\n # NOTE: The global `_mp_model` variable is shared with the `_mp_predict`\n # method.\n # This global variable should only be used within the inference\n # multiprocessing workers.\n global _mp_model\n\n model_class = model_settings.implementation\n _mp_model = model_class(model_settings) # type: ignore\n return asyncio.run(_mp_model.load())\n\n\ndef _mp_predict(payload: InferenceRequest) -> InferenceResponse:\n \"\"\"\n This method is meant to run internally in the multiprocessing workers.\n The prediction needs to run synchronously, since multiprocessing\n doesn't know how to serialise coroutines.\n \"\"\"\n # NOTE: `_mp_model` is a global variable initialised in the `_mp_load`\n # method.\n # This global variable is only to be used within the inference worker\n # context.\n global _mp_model\n return asyncio.run(_mp_model.predict(payload))\n\n\nclass InferencePool:\n \"\"\"\n The InferencePool class represents a pool of workers where we can run\n inference on.\n\n Under the hood, it's responsible for managing a pool of multiprocessing\n workers, where the model is loaded.\n This approach lets MLServer work around the GIL to make sure that inference\n can occur in parallel across multiple models or instances of a model.\n \"\"\"\n\n def __init__(self, model: MLModel):\n parallel_workers = model.settings.parallel_workers\n\n # Use 'spawn' instead of 'fork' to ensure that models are loaded in a\n # clean environment (e.g. to avoid issues like\n # https://github.com/tensorflow/tensorflow/issues/8220)\n ctx = mp.get_context(\"spawn\")\n self._executor = ProcessPoolExecutor(\n max_workers=parallel_workers,\n mp_context=ctx,\n initializer=_mp_load,\n initargs=(model.settings,),\n )\n\n async def predict(self, payload: InferenceRequest) -> InferenceResponse:\n # What if we serialise payload?\n loop = asyncio.get_running_loop()\n return await loop.run_in_executor(self._executor, _mp_predict, payload)\n\n def __del__(self):\n self._executor.shutdown(wait=True)\n\n\ndef parallel(f: Callable[[InferenceRequest], Coroutine[Any, Any, InferenceResponse]]):\n \"\"\"\n Decorator to attach to model's methods so that they run in parallel.\n By default, this will get attached to every model's \"inference\" method.\n\n NOTE: At the moment, this method only works with `predict()`.\n \"\"\"\n # TODO: Extend to multiple methods\n @wraps(f)\n async def _inner(payload: InferenceRequest) -> InferenceResponse:\n if not hasattr(f, \"__self__\"):\n raise InvalidParallelMethod(f.__name__, reason=\"method is not bound\")\n\n model = getattr(f, \"__self__\")\n if not hasattr(model, _InferencePoolAttr):\n raise InvalidParallelMethod(\n f.__name__, reason=\"inference pool has not been loaded\"\n )\n\n pool = getattr(model, _InferencePoolAttr)\n return await pool.predict(payload)\n\n return _inner\n\n\nasync def load_inference_pool(model: MLModel):\n if model.settings.parallel_workers == 0:\n # When parallel workers is set to 0, disable parallel inference\n return model\n\n pool = InferencePool(model)\n setattr(model, _InferencePoolAttr, pool)\n\n # Decorate predict method\n setattr(model, \"predict\", parallel(model.predict))\n\n return model\n\n\nasync def unload_inference_pool(model: MLModel):\n pool = getattr(model, _InferencePoolAttr)\n if not pool:\n return\n\n pool.__del__()\n delattr(model, _InferencePoolAttr)\n", "path": "mlserver/parallel.py"}]} | 1,588 | 253 |
gh_patches_debug_6029 | rasdani/github-patches | git_diff | pytorch__ignite-768 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
log_message() method fails `desc` is passed to contrib tqdm
## 🐛 Bug description
If I pass a `desc` argument when instantiating a `ignite.contrib.handlers.ProgressBar` the calls to its `log_message()` method fail with this exception:
```
TypeError: write() got an unexpected keyword argument 'desc'
```
## Environment
- PyTorch Version: 1.3.1
- Ignite Version: 0.3.0
- OS: Linux
- How you installed Ignite (`conda`, `pip`, source): Conda
- Python version: 3.7
- Any other relevant information:
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ignite/contrib/handlers/tqdm_logger.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import warnings
3
4 import torch
5
6 from ignite.engine import Events
7 from ignite.engine.engine import EventWithFilter
8 from ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler
9
10
11 class ProgressBar(BaseLogger):
12 """
13 TQDM progress bar handler to log training progress and computed metrics.
14
15 Args:
16 persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)
17 bar_format (str, optional): Specify a custom bar string formatting. May impact performance.
18 [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].
19 Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where
20 l_bar='{desc}: {percentage:3.0f}%|' and
21 r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the
22 formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.
23 **tqdm_kwargs: kwargs passed to tqdm progress bar.
24 By default, progress bar description displays "Epoch [5/10]" where 5 is the current epoch and 10 is the
25 number of epochs. If tqdm_kwargs defines `desc`, e.g. "Predictions", than the description is
26 "Predictions [5/10]" if number of epochs is more than one otherwise it is simply "Predictions".
27
28 Examples:
29
30 Simple progress bar
31
32 .. code-block:: python
33
34 trainer = create_supervised_trainer(model, optimizer, loss)
35
36 pbar = ProgressBar()
37 pbar.attach(trainer)
38
39 # Progress bar will looks like
40 # Epoch [2/50]: [64/128] 50%|█████ [06:17<12:34]
41
42 Log output to a file instead of stderr (tqdm's default output)
43
44 .. code-block:: python
45
46 trainer = create_supervised_trainer(model, optimizer, loss)
47
48 log_file = open("output.log", "w")
49 pbar = ProgressBar(file=log_file)
50 pbar.attach(trainer)
51
52 Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`
53 (such as :class:`~ignite.metrics.RunningAverage`)
54
55 .. code-block:: python
56
57 trainer = create_supervised_trainer(model, optimizer, loss)
58
59 RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')
60
61 pbar = ProgressBar()
62 pbar.attach(trainer, ['loss'])
63
64 # Progress bar will looks like
65 # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
66
67 Directly attach the engine's output
68
69 .. code-block:: python
70
71 trainer = create_supervised_trainer(model, optimizer, loss)
72
73 pbar = ProgressBar()
74 pbar.attach(trainer, output_transform=lambda x: {'loss': x})
75
76 # Progress bar will looks like
77 # Epoch [2/50]: [64/128] 50%|█████ , loss=0.123 [06:17<12:34]
78
79 Note:
80 When adding attaching the progress bar to an engine, it is recommend that you replace
81 every print operation in the engine's handlers triggered every iteration with
82 ``pbar.log_message`` to guarantee the correct format of the stdout.
83
84 Note:
85 When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,
86 please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.
87 Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set
88 to an empty string value.
89
90 """
91
92 _events_order = [
93 Events.STARTED,
94 Events.EPOCH_STARTED,
95 Events.ITERATION_STARTED,
96 Events.ITERATION_COMPLETED,
97 Events.EPOCH_COMPLETED,
98 Events.COMPLETED
99 ]
100
101 def __init__(self, persist=False,
102 bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',
103 **tqdm_kwargs):
104
105 try:
106 from tqdm.autonotebook import tqdm
107 except ImportError:
108 raise RuntimeError("This contrib module requires tqdm to be installed. "
109 "Please install it with command: \n pip install tqdm")
110
111 self.pbar_cls = tqdm
112 self.pbar = None
113 self.persist = persist
114 self.bar_format = bar_format
115 self.tqdm_kwargs = tqdm_kwargs
116
117 def _reset(self, pbar_total):
118 self.pbar = self.pbar_cls(
119 total=pbar_total,
120 leave=self.persist,
121 bar_format=self.bar_format,
122 initial=1,
123 **self.tqdm_kwargs
124 )
125
126 def _close(self, engine):
127 if self.pbar:
128 self.pbar.close()
129 self.pbar = None
130
131 @staticmethod
132 def _compare_lt(event1, event2):
133 if isinstance(event1, EventWithFilter):
134 event1 = event1.event
135 if isinstance(event2, EventWithFilter):
136 event2 = event2.event
137 i1 = ProgressBar._events_order.index(event1)
138 i2 = ProgressBar._events_order.index(event2)
139 return i1 < i2
140
141 def log_message(self, message):
142 """
143 Logs a message, preserving the progress bar correct output format.
144
145 Args:
146 message (str): string you wish to log.
147 """
148 from tqdm import tqdm
149 tqdm.write(message, **self.tqdm_kwargs)
150
151 def attach(self, engine, metric_names=None, output_transform=None,
152 event_name=Events.ITERATION_COMPLETED,
153 closing_event_name=Events.EPOCH_COMPLETED):
154 """
155 Attaches the progress bar to an engine object.
156
157 Args:
158 engine (Engine): engine object.
159 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
160 metrics.
161 output_transform (callable, optional): a function to select what you want to print from the engine's
162 output. This function may return either a dictionary with entries in the format of ``{name: value}``,
163 or a single scalar, which will be displayed with the default name `output`.
164 event_name: event's name on which the progress bar advances. Valid events are from
165 :class:`~ignite.engine.Events`.
166 closing_event_name: event's name on which the progress bar is closed. Valid events are from
167 :class:`~ignite.engine.Events`.
168
169 Note: accepted output value types are numbers, 0d and 1d torch tensors and strings
170
171 """
172 desc = self.tqdm_kwargs.get("desc", "Epoch")
173
174 if not isinstance(event_name, (Events, EventWithFilter)):
175 raise ValueError("Logging event should be only `ignite.engine.Events`")
176
177 if isinstance(closing_event_name, EventWithFilter):
178 raise ValueError("Closing event should not use any event filter")
179
180 if not self._compare_lt(event_name, closing_event_name):
181 raise ValueError("Logging event {} should be called before closing event {}"
182 .format(event_name, closing_event_name))
183
184 log_handler = _OutputHandler(desc, metric_names, output_transform,
185 closing_event_name=closing_event_name)
186 # if event_name is EventWithFilter, filter is passed here
187 super(ProgressBar, self).attach(engine, log_handler, event_name)
188 engine.add_event_handler(closing_event_name, self._close)
189
190
191 class _OutputHandler(BaseOutputHandler):
192 """Helper handler to log engine's output and/or metrics
193
194 Args:
195 description (str): progress bar description.
196 metric_names (list of str, optional): list of metric names to plot or a string "all" to plot all available
197 metrics.
198 output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.
199 For example, `output_transform = lambda output: output`
200 This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot
201 with corresponding keys.
202 closing_event_name: event's name on which the progress bar is closed. Valid events are from
203 :class:`~ignite.engine.Events` or any `event_name` added by
204 :meth:`~ignite.engine.Engine.register_events`.
205
206 """
207
208 def __init__(self, description, metric_names=None, output_transform=None,
209 closing_event_name=Events.EPOCH_COMPLETED):
210 if metric_names is None and output_transform is None:
211 # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler
212 metric_names = []
213 super(_OutputHandler, self).__init__(description, metric_names, output_transform,
214 another_engine=None, global_step_transform=None)
215 self.closing_event_name = closing_event_name
216
217 @staticmethod
218 def get_max_number_events(event_name, engine):
219 if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):
220 return len(engine.state.dataloader)
221 if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):
222 return engine.state.max_epochs
223 return 1
224
225 def __call__(self, engine, logger, event_name):
226
227 pbar_total = self.get_max_number_events(event_name, engine)
228 if logger.pbar is None:
229 logger._reset(pbar_total=pbar_total)
230
231 desc = self.tag
232 max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)
233 if max_num_of_closing_events > 1:
234 global_step = engine.state.get_event_attrib_value(self.closing_event_name)
235 desc += " [{}/{}]".format(global_step, max_num_of_closing_events)
236 logger.pbar.set_description(desc)
237
238 metrics = self._setup_output_metrics(engine)
239
240 rendered_metrics = {}
241 for key, value in metrics.items():
242 if isinstance(value, torch.Tensor):
243 if value.ndimension() == 0:
244 rendered_metrics[key] = value.item()
245 elif value.ndimension() == 1:
246 for i, v in enumerate(value):
247 k = "{}_{}".format(key, i)
248 rendered_metrics[k] = v.item()
249 else:
250 warnings.warn("ProgressBar can not log "
251 "tensor with {} dimensions".format(value.ndimension()))
252 else:
253 rendered_metrics[key] = value
254
255 if rendered_metrics:
256 logger.pbar.set_postfix(**rendered_metrics)
257
258 global_step = engine.state.get_event_attrib_value(event_name)
259 global_step = (global_step - 1) % pbar_total + 1
260 logger.pbar.update(global_step - logger.pbar.n)
261
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py
--- a/ignite/contrib/handlers/tqdm_logger.py
+++ b/ignite/contrib/handlers/tqdm_logger.py
@@ -146,7 +146,8 @@
message (str): string you wish to log.
"""
from tqdm import tqdm
- tqdm.write(message, **self.tqdm_kwargs)
+
+ tqdm.write(message, file=self.tqdm_kwargs.get("file", None))
def attach(self, engine, metric_names=None, output_transform=None,
event_name=Events.ITERATION_COMPLETED,
| {"golden_diff": "diff --git a/ignite/contrib/handlers/tqdm_logger.py b/ignite/contrib/handlers/tqdm_logger.py\n--- a/ignite/contrib/handlers/tqdm_logger.py\n+++ b/ignite/contrib/handlers/tqdm_logger.py\n@@ -146,7 +146,8 @@\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n- tqdm.write(message, **self.tqdm_kwargs)\n+\n+ tqdm.write(message, file=self.tqdm_kwargs.get(\"file\", None))\n \n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n", "issue": "log_message() method fails `desc` is passed to contrib tqdm\n## \ud83d\udc1b Bug description\r\n\r\nIf I pass a `desc` argument when instantiating a `ignite.contrib.handlers.ProgressBar` the calls to its `log_message()` method fail with this exception:\r\n\r\n```\r\nTypeError: write() got an unexpected keyword argument 'desc'\r\n```\r\n\r\n## Environment\r\n\r\n - PyTorch Version: 1.3.1\r\n - Ignite Version: 0.3.0\r\n - OS: Linux\r\n - How you installed Ignite (`conda`, `pip`, source): Conda\r\n - Python version: 3.7\r\n - Any other relevant information:\r\n\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.engine.engine import EventWithFilter\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Log output to a file instead of stderr (tqdm's default output)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n log_file = open(\"output.log\", \"w\")\n pbar = ProgressBar(file=log_file)\n pbar.attach(trainer)\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n _events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n initial=1,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n if isinstance(event1, EventWithFilter):\n event1 = event1.event\n if isinstance(event2, EventWithFilter):\n event2 = event2.event\n i1 = ProgressBar._events_order.index(event1)\n i2 = ProgressBar._events_order.index(event2)\n return i1 < i2\n\n def log_message(self, message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n tqdm.write(message, **self.tqdm_kwargs)\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not isinstance(event_name, (Events, EventWithFilter)):\n raise ValueError(\"Logging event should be only `ignite.engine.Events`\")\n\n if isinstance(closing_event_name, EventWithFilter):\n raise ValueError(\"Closing event should not use any event filter\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n closing_event_name=closing_event_name)\n # if event_name is EventWithFilter, filter is passed here\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot\n with corresponding keys.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n\n def __init__(self, description, metric_names=None, output_transform=None,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n pbar_total = self.get_max_number_events(event_name, engine)\n if logger.pbar is None:\n logger._reset(pbar_total=pbar_total)\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n global_step = engine.state.get_event_attrib_value(event_name)\n global_step = (global_step - 1) % pbar_total + 1\n logger.pbar.update(global_step - logger.pbar.n)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport warnings\n\nimport torch\n\nfrom ignite.engine import Events\nfrom ignite.engine.engine import EventWithFilter\nfrom ignite.contrib.handlers.base_logger import BaseLogger, BaseOutputHandler\n\n\nclass ProgressBar(BaseLogger):\n \"\"\"\n TQDM progress bar handler to log training progress and computed metrics.\n\n Args:\n persist (bool, optional): set to ``True`` to persist the progress bar after completion (default = ``False``)\n bar_format (str, optional): Specify a custom bar string formatting. May impact performance.\n [default: '{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]'].\n Set to ``None`` to use ``tqdm`` default bar formatting: '{l_bar}{bar}{r_bar}', where\n l_bar='{desc}: {percentage:3.0f}%|' and\n r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}{postfix}]'. For more details on the\n formatting, see `tqdm docs <https://tqdm.github.io/docs/tqdm/>`_.\n **tqdm_kwargs: kwargs passed to tqdm progress bar.\n By default, progress bar description displays \"Epoch [5/10]\" where 5 is the current epoch and 10 is the\n number of epochs. If tqdm_kwargs defines `desc`, e.g. \"Predictions\", than the description is\n \"Predictions [5/10]\" if number of epochs is more than one otherwise it is simply \"Predictions\".\n\n Examples:\n\n Simple progress bar\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer)\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 [06:17<12:34]\n\n Log output to a file instead of stderr (tqdm's default output)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n log_file = open(\"output.log\", \"w\")\n pbar = ProgressBar(file=log_file)\n pbar.attach(trainer)\n\n Attach metrics that already have been computed at :attr:`~ignite.engine.Events.ITERATION_COMPLETED`\n (such as :class:`~ignite.metrics.RunningAverage`)\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n RunningAverage(output_transform=lambda x: x).attach(trainer, 'loss')\n\n pbar = ProgressBar()\n pbar.attach(trainer, ['loss'])\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Directly attach the engine's output\n\n .. code-block:: python\n\n trainer = create_supervised_trainer(model, optimizer, loss)\n\n pbar = ProgressBar()\n pbar.attach(trainer, output_transform=lambda x: {'loss': x})\n\n # Progress bar will looks like\n # Epoch [2/50]: [64/128] 50%|\u2588\u2588\u2588\u2588\u2588 , loss=0.123 [06:17<12:34]\n\n Note:\n When adding attaching the progress bar to an engine, it is recommend that you replace\n every print operation in the engine's handlers triggered every iteration with\n ``pbar.log_message`` to guarantee the correct format of the stdout.\n\n Note:\n When using inside jupyter notebook, `ProgressBar` automatically uses `tqdm_notebook`. For correct rendering,\n please install `ipywidgets <https://ipywidgets.readthedocs.io/en/stable/user_install.html#installation>`_.\n Due to `tqdm notebook bugs <https://github.com/tqdm/tqdm/issues/594>`_, bar format may be needed to be set\n to an empty string value.\n\n \"\"\"\n\n _events_order = [\n Events.STARTED,\n Events.EPOCH_STARTED,\n Events.ITERATION_STARTED,\n Events.ITERATION_COMPLETED,\n Events.EPOCH_COMPLETED,\n Events.COMPLETED\n ]\n\n def __init__(self, persist=False,\n bar_format='{desc}[{n_fmt}/{total_fmt}] {percentage:3.0f}%|{bar}{postfix} [{elapsed}<{remaining}]',\n **tqdm_kwargs):\n\n try:\n from tqdm.autonotebook import tqdm\n except ImportError:\n raise RuntimeError(\"This contrib module requires tqdm to be installed. \"\n \"Please install it with command: \\n pip install tqdm\")\n\n self.pbar_cls = tqdm\n self.pbar = None\n self.persist = persist\n self.bar_format = bar_format\n self.tqdm_kwargs = tqdm_kwargs\n\n def _reset(self, pbar_total):\n self.pbar = self.pbar_cls(\n total=pbar_total,\n leave=self.persist,\n bar_format=self.bar_format,\n initial=1,\n **self.tqdm_kwargs\n )\n\n def _close(self, engine):\n if self.pbar:\n self.pbar.close()\n self.pbar = None\n\n @staticmethod\n def _compare_lt(event1, event2):\n if isinstance(event1, EventWithFilter):\n event1 = event1.event\n if isinstance(event2, EventWithFilter):\n event2 = event2.event\n i1 = ProgressBar._events_order.index(event1)\n i2 = ProgressBar._events_order.index(event2)\n return i1 < i2\n\n def log_message(self, message):\n \"\"\"\n Logs a message, preserving the progress bar correct output format.\n\n Args:\n message (str): string you wish to log.\n \"\"\"\n from tqdm import tqdm\n\n tqdm.write(message, file=self.tqdm_kwargs.get(\"file\", None))\n\n def attach(self, engine, metric_names=None, output_transform=None,\n event_name=Events.ITERATION_COMPLETED,\n closing_event_name=Events.EPOCH_COMPLETED):\n \"\"\"\n Attaches the progress bar to an engine object.\n\n Args:\n engine (Engine): engine object.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): a function to select what you want to print from the engine's\n output. This function may return either a dictionary with entries in the format of ``{name: value}``,\n or a single scalar, which will be displayed with the default name `output`.\n event_name: event's name on which the progress bar advances. Valid events are from\n :class:`~ignite.engine.Events`.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events`.\n\n Note: accepted output value types are numbers, 0d and 1d torch tensors and strings\n\n \"\"\"\n desc = self.tqdm_kwargs.get(\"desc\", \"Epoch\")\n\n if not isinstance(event_name, (Events, EventWithFilter)):\n raise ValueError(\"Logging event should be only `ignite.engine.Events`\")\n\n if isinstance(closing_event_name, EventWithFilter):\n raise ValueError(\"Closing event should not use any event filter\")\n\n if not self._compare_lt(event_name, closing_event_name):\n raise ValueError(\"Logging event {} should be called before closing event {}\"\n .format(event_name, closing_event_name))\n\n log_handler = _OutputHandler(desc, metric_names, output_transform,\n closing_event_name=closing_event_name)\n # if event_name is EventWithFilter, filter is passed here\n super(ProgressBar, self).attach(engine, log_handler, event_name)\n engine.add_event_handler(closing_event_name, self._close)\n\n\nclass _OutputHandler(BaseOutputHandler):\n \"\"\"Helper handler to log engine's output and/or metrics\n\n Args:\n description (str): progress bar description.\n metric_names (list of str, optional): list of metric names to plot or a string \"all\" to plot all available\n metrics.\n output_transform (callable, optional): output transform function to prepare `engine.state.output` as a number.\n For example, `output_transform = lambda output: output`\n This function can also return a dictionary, e.g `{'loss': loss1, 'another_loss': loss2}` to label the plot\n with corresponding keys.\n closing_event_name: event's name on which the progress bar is closed. Valid events are from\n :class:`~ignite.engine.Events` or any `event_name` added by\n :meth:`~ignite.engine.Engine.register_events`.\n\n \"\"\"\n\n def __init__(self, description, metric_names=None, output_transform=None,\n closing_event_name=Events.EPOCH_COMPLETED):\n if metric_names is None and output_transform is None:\n # This helps to avoid 'Either metric_names or output_transform should be defined' of BaseOutputHandler\n metric_names = []\n super(_OutputHandler, self).__init__(description, metric_names, output_transform,\n another_engine=None, global_step_transform=None)\n self.closing_event_name = closing_event_name\n\n @staticmethod\n def get_max_number_events(event_name, engine):\n if event_name in (Events.ITERATION_STARTED, Events.ITERATION_COMPLETED):\n return len(engine.state.dataloader)\n if event_name in (Events.EPOCH_STARTED, Events.EPOCH_COMPLETED):\n return engine.state.max_epochs\n return 1\n\n def __call__(self, engine, logger, event_name):\n\n pbar_total = self.get_max_number_events(event_name, engine)\n if logger.pbar is None:\n logger._reset(pbar_total=pbar_total)\n\n desc = self.tag\n max_num_of_closing_events = self.get_max_number_events(self.closing_event_name, engine)\n if max_num_of_closing_events > 1:\n global_step = engine.state.get_event_attrib_value(self.closing_event_name)\n desc += \" [{}/{}]\".format(global_step, max_num_of_closing_events)\n logger.pbar.set_description(desc)\n\n metrics = self._setup_output_metrics(engine)\n\n rendered_metrics = {}\n for key, value in metrics.items():\n if isinstance(value, torch.Tensor):\n if value.ndimension() == 0:\n rendered_metrics[key] = value.item()\n elif value.ndimension() == 1:\n for i, v in enumerate(value):\n k = \"{}_{}\".format(key, i)\n rendered_metrics[k] = v.item()\n else:\n warnings.warn(\"ProgressBar can not log \"\n \"tensor with {} dimensions\".format(value.ndimension()))\n else:\n rendered_metrics[key] = value\n\n if rendered_metrics:\n logger.pbar.set_postfix(**rendered_metrics)\n\n global_step = engine.state.get_event_attrib_value(event_name)\n global_step = (global_step - 1) % pbar_total + 1\n logger.pbar.update(global_step - logger.pbar.n)\n", "path": "ignite/contrib/handlers/tqdm_logger.py"}]} | 3,519 | 151 |
gh_patches_debug_63843 | rasdani/github-patches | git_diff | WeblateOrg__weblate-9567 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Microsoft automatic translation fails for Serbian ("sr")
### Describe the issue
For the locale Serbian - "sr" the automatic translation with Microsoft Translator does not work. There are no "Automatic suggestions" and the "Automatic translation" tool does not get any texts.
### I already tried
- [X] I've read and searched [the documentation](https://docs.weblate.org/).
- [X] I've searched for similar issues in this repository.
### Steps to reproduce the behavior
1. Add Microsoft Translator to Weblate
2. Create a project and component with the language "Serbian" - "sr"
3. Go to `/translate/{project}/{component}/sr/?q=state:<translated` and see that no texts are suggested
### Expected behavior
Automatic suggestions should be shown for Serbian.
### Screenshots
_No response_
### Exception traceback
_No response_
### How do you run Weblate?
Docker container
### Weblate versions
* Weblate: 4.18.2
* Django: 4.2.2
* siphashc: 2.1
* translate-toolkit: 3.9.2
* lxml: 4.9.2
* Pillow: 9.5.0
* nh3: 0.2.13
* python-dateutil: 2.8.2
* social-auth-core: 4.4.2
* social-auth-app-django: 5.2.0
* django-crispy-forms: 2.0
* oauthlib: 3.2.2
* django-compressor: 4.4
* djangorestframework: 3.14.0
* django-filter: 23.2
* django-appconf: 1.0.5
* user-agents: 2.2.0
* filelock: 3.12.2
* rapidfuzz: 3.1.1
* openpyxl: 3.1.2
* celery: 5.3.1
* django-celery-beat: 2.5.0
* kombu: 5.3.1
* translation-finder: 2.15
* weblate-language-data: 2023.5
* html2text: 2020.1.16
* pycairo: 1.24.0
* PyGObject: 3.44.1
* diff-match-patch: 20230430
* requests: 2.31.0
* django-redis: 5.3.0
* hiredis: 2.2.3
* sentry-sdk: 1.26.0
* Cython: 0.29.35
* misaka: 2.1.1
* GitPython: 3.1.31
* borgbackup: 1.2.4
* pyparsing: 3.0.9
* pyahocorasick: 2.0.0
* python-redis-lock: 4.0.0
* charset-normalizer: 3.1.0
* Python: 3.11.4
* Git: 2.30.2
* psycopg2: 2.9.6
* phply: 1.2.6
* ruamel.yaml: 0.17.32
* tesserocr: 2.6.0
* boto3: 1.26.164
* zeep: 4.2.1
* aeidon: 1.12
* iniparse: 0.5
* mysqlclient: 2.2.0
* Mercurial: 6.4.5
* git-svn: 2.30.2
* git-review: 2.3.1
* Redis server: 6.2.12
* PostgreSQL server: 13.10
* Database backends: django.db.backends.postgresql
* Cache backends: default:RedisCache, avatar:FileBasedCache
* Email setup: django.core.mail.backends.smtp.EmailBackend: mailz.porsche.co.at
* OS encoding: filesystem=utf-8, default=utf-8
* Celery: redis://localhost:6379/1, redis://localhost:6379/1, regular
* Platform: Linux 3.10.0-1160.90.1.el7.x86_64 (x86_64)
### Weblate deploy checks
```shell
System check identified some issues:
INFOS:
?: (weblate.I021) Error collection is not set up, it is highly recommended for production use
HINT: https://docs.weblate.org/en/latest/admin/install.html#collecting-errors
System check identified 1 issue (1 silenced).
```
### Additional context
It seems that Microsoft translator treats "sr" as "sr-Latn".
For example:
```
POST https://api-eur.cognitive.microsofttranslator.com/translate?api-version=3.0&from=en&to=sr
Content-Type: application/json
[{"Text":"Hello World!"}]
```
gets the answer
```
[
{
"translations": [
{
"text": "Zdravo svete!",
"to": "sr-Latn"
}
]
}
]
```
I think this has to be added to the `language_map`: https://github.com/WeblateOrg/weblate/blob/5674acc39e21ea092c0d2fba89569b802315595a/weblate/machinery/microsoft.py#L26
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `weblate/machinery/microsoft.py`
Content:
```
1 # Copyright © Michal Čihař <[email protected]>
2 #
3 # SPDX-License-Identifier: GPL-3.0-or-later
4
5 from __future__ import annotations
6
7 from datetime import timedelta
8
9 from django.conf import settings
10 from django.utils import timezone
11
12 from .base import MachineTranslation, MachineTranslationError
13 from .forms import MicrosoftMachineryForm
14
15 TOKEN_URL = "https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}"
16 TOKEN_EXPIRY = timedelta(minutes=9)
17
18
19 class MicrosoftCognitiveTranslation(MachineTranslation):
20 """Microsoft Cognitive Services Translator API support."""
21
22 name = "Microsoft Translator"
23 max_score = 90
24 settings_form = MicrosoftMachineryForm
25
26 language_map = {
27 "zh-hant": "zh-Hant",
28 "zh-hans": "zh-Hans",
29 "zh-tw": "zh-Hant",
30 "zh-cn": "zh-Hans",
31 "tlh": "tlh-Latn",
32 "tlh-qaak": "tlh-Piqd",
33 "nb": "no",
34 "bs-latn": "bs-Latn",
35 "sr-latn": "sr-Latn",
36 "sr-cyrl": "sr-Cyrl",
37 "mn": "mn-Mong",
38 }
39
40 def __init__(self, settings: dict[str, str]):
41 """Check configuration."""
42 super().__init__(settings)
43 self._access_token = None
44 self._token_expiry = None
45
46 # check settings for Microsoft region prefix
47 region = "" if not self.settings["region"] else f"{self.settings['region']}."
48
49 self._cognitive_token_url = TOKEN_URL.format(
50 region,
51 self.settings["endpoint_url"],
52 self.settings["key"],
53 )
54
55 @staticmethod
56 def migrate_settings():
57 return {
58 "region": settings.MT_MICROSOFT_REGION,
59 "endpoint_url": settings.MT_MICROSOFT_ENDPOINT_URL,
60 "base_url": settings.MT_MICROSOFT_BASE_URL,
61 "key": settings.MT_MICROSOFT_COGNITIVE_KEY,
62 }
63
64 def get_url(self, suffix):
65 return f"https://{self.settings['base_url']}/{suffix}"
66
67 def is_token_expired(self):
68 """Check whether token is about to expire."""
69 return self._token_expiry <= timezone.now()
70
71 def get_authentication(self):
72 """Hook for backends to allow add authentication headers to request."""
73 return {"Authorization": f"Bearer {self.access_token}"}
74
75 @property
76 def access_token(self):
77 """Obtain and caches access token."""
78 if self._access_token is None or self.is_token_expired():
79 self._access_token = self.request(
80 "post", self._cognitive_token_url, skip_auth=True
81 ).text
82 self._token_expiry = timezone.now() + TOKEN_EXPIRY
83
84 return self._access_token
85
86 def map_language_code(self, code):
87 """Convert language to service specific code."""
88 return super().map_language_code(code).replace("_", "-")
89
90 def download_languages(self):
91 """
92 Download list of supported languages from a service.
93
94 Example of the response:
95
96 ['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',
97 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',
98 'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',
99 'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',
100 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',
101 'cy']
102 """
103 response = self.request(
104 "get", self.get_url("languages"), params={"api-version": "3.0"}
105 )
106 # Microsoft tends to use utf-8-sig instead of plain utf-8
107 response.encoding = response.apparent_encoding
108 payload = response.json()
109
110 # We should get an object, string usually means an error
111 if isinstance(payload, str):
112 raise MachineTranslationError(payload)
113
114 return payload["translation"].keys()
115
116 def download_translations(
117 self,
118 source,
119 language,
120 text: str,
121 unit,
122 user,
123 threshold: int = 75,
124 ):
125 """Download list of possible translations from a service."""
126 args = {
127 "api-version": "3.0",
128 "from": source,
129 "to": language,
130 "category": "general",
131 }
132 response = self.request(
133 "post", self.get_url("translate"), params=args, json=[{"Text": text[:5000]}]
134 )
135 # Microsoft tends to use utf-8-sig instead of plain utf-8
136 response.encoding = "utf-8-sig"
137 payload = response.json()
138 yield {
139 "text": payload[0]["translations"][0]["text"],
140 "quality": self.max_score,
141 "service": self.name,
142 "source": text,
143 }
144
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/weblate/machinery/microsoft.py b/weblate/machinery/microsoft.py
--- a/weblate/machinery/microsoft.py
+++ b/weblate/machinery/microsoft.py
@@ -32,6 +32,7 @@
"tlh-qaak": "tlh-Piqd",
"nb": "no",
"bs-latn": "bs-Latn",
+ "sr": "sr-Latn",
"sr-latn": "sr-Latn",
"sr-cyrl": "sr-Cyrl",
"mn": "mn-Mong",
| {"golden_diff": "diff --git a/weblate/machinery/microsoft.py b/weblate/machinery/microsoft.py\n--- a/weblate/machinery/microsoft.py\n+++ b/weblate/machinery/microsoft.py\n@@ -32,6 +32,7 @@\n \"tlh-qaak\": \"tlh-Piqd\",\n \"nb\": \"no\",\n \"bs-latn\": \"bs-Latn\",\n+ \"sr\": \"sr-Latn\",\n \"sr-latn\": \"sr-Latn\",\n \"sr-cyrl\": \"sr-Cyrl\",\n \"mn\": \"mn-Mong\",\n", "issue": "Microsoft automatic translation fails for Serbian (\"sr\")\n### Describe the issue\n\nFor the locale Serbian - \"sr\" the automatic translation with Microsoft Translator does not work. There are no \"Automatic suggestions\" and the \"Automatic translation\" tool does not get any texts.\n\n### I already tried\n\n- [X] I've read and searched [the documentation](https://docs.weblate.org/).\n- [X] I've searched for similar issues in this repository.\n\n### Steps to reproduce the behavior\n\n1. Add Microsoft Translator to Weblate\r\n2. Create a project and component with the language \"Serbian\" - \"sr\"\r\n3. Go to `/translate/{project}/{component}/sr/?q=state:<translated` and see that no texts are suggested\n\n### Expected behavior\n\nAutomatic suggestions should be shown for Serbian.\n\n### Screenshots\n\n_No response_\n\n### Exception traceback\n\n_No response_\n\n### How do you run Weblate?\n\nDocker container\n\n### Weblate versions\n\n * Weblate: 4.18.2\r\n * Django: 4.2.2\r\n * siphashc: 2.1\r\n * translate-toolkit: 3.9.2\r\n * lxml: 4.9.2\r\n * Pillow: 9.5.0\r\n * nh3: 0.2.13\r\n * python-dateutil: 2.8.2\r\n * social-auth-core: 4.4.2\r\n * social-auth-app-django: 5.2.0\r\n * django-crispy-forms: 2.0\r\n * oauthlib: 3.2.2\r\n * django-compressor: 4.4\r\n * djangorestframework: 3.14.0\r\n * django-filter: 23.2\r\n * django-appconf: 1.0.5\r\n * user-agents: 2.2.0\r\n * filelock: 3.12.2\r\n * rapidfuzz: 3.1.1\r\n * openpyxl: 3.1.2\r\n * celery: 5.3.1\r\n * django-celery-beat: 2.5.0\r\n * kombu: 5.3.1\r\n * translation-finder: 2.15\r\n * weblate-language-data: 2023.5\r\n * html2text: 2020.1.16\r\n * pycairo: 1.24.0\r\n * PyGObject: 3.44.1\r\n * diff-match-patch: 20230430\r\n * requests: 2.31.0\r\n * django-redis: 5.3.0\r\n * hiredis: 2.2.3\r\n * sentry-sdk: 1.26.0\r\n * Cython: 0.29.35\r\n * misaka: 2.1.1\r\n * GitPython: 3.1.31\r\n * borgbackup: 1.2.4\r\n * pyparsing: 3.0.9\r\n * pyahocorasick: 2.0.0\r\n * python-redis-lock: 4.0.0\r\n * charset-normalizer: 3.1.0\r\n * Python: 3.11.4\r\n * Git: 2.30.2\r\n * psycopg2: 2.9.6\r\n * phply: 1.2.6\r\n * ruamel.yaml: 0.17.32\r\n * tesserocr: 2.6.0\r\n * boto3: 1.26.164\r\n * zeep: 4.2.1\r\n * aeidon: 1.12\r\n * iniparse: 0.5\r\n * mysqlclient: 2.2.0\r\n * Mercurial: 6.4.5\r\n * git-svn: 2.30.2\r\n * git-review: 2.3.1\r\n * Redis server: 6.2.12\r\n * PostgreSQL server: 13.10\r\n * Database backends: django.db.backends.postgresql\r\n * Cache backends: default:RedisCache, avatar:FileBasedCache\r\n * Email setup: django.core.mail.backends.smtp.EmailBackend: mailz.porsche.co.at\r\n * OS encoding: filesystem=utf-8, default=utf-8\r\n * Celery: redis://localhost:6379/1, redis://localhost:6379/1, regular\r\n * Platform: Linux 3.10.0-1160.90.1.el7.x86_64 (x86_64)\n\n### Weblate deploy checks\n\n```shell\nSystem check identified some issues:\r\n\r\nINFOS:\r\n?: (weblate.I021) Error collection is not set up, it is highly recommended for production use\r\n HINT: https://docs.weblate.org/en/latest/admin/install.html#collecting-errors\r\n\r\nSystem check identified 1 issue (1 silenced).\n```\n\n\n### Additional context\n\nIt seems that Microsoft translator treats \"sr\" as \"sr-Latn\".\r\n\r\nFor example:\r\n``` \r\nPOST https://api-eur.cognitive.microsofttranslator.com/translate?api-version=3.0&from=en&to=sr\r\nContent-Type: application/json\r\n\r\n[{\"Text\":\"Hello World!\"}]\r\n```\r\n\r\ngets the answer\r\n```\r\n[\r\n {\r\n \"translations\": [\r\n {\r\n \"text\": \"Zdravo svete!\",\r\n \"to\": \"sr-Latn\"\r\n }\r\n ]\r\n }\r\n]\r\n```\r\n\r\nI think this has to be added to the `language_map`: https://github.com/WeblateOrg/weblate/blob/5674acc39e21ea092c0d2fba89569b802315595a/weblate/machinery/microsoft.py#L26\n", "before_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom .base import MachineTranslation, MachineTranslationError\nfrom .forms import MicrosoftMachineryForm\n\nTOKEN_URL = \"https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}\"\nTOKEN_EXPIRY = timedelta(minutes=9)\n\n\nclass MicrosoftCognitiveTranslation(MachineTranslation):\n \"\"\"Microsoft Cognitive Services Translator API support.\"\"\"\n\n name = \"Microsoft Translator\"\n max_score = 90\n settings_form = MicrosoftMachineryForm\n\n language_map = {\n \"zh-hant\": \"zh-Hant\",\n \"zh-hans\": \"zh-Hans\",\n \"zh-tw\": \"zh-Hant\",\n \"zh-cn\": \"zh-Hans\",\n \"tlh\": \"tlh-Latn\",\n \"tlh-qaak\": \"tlh-Piqd\",\n \"nb\": \"no\",\n \"bs-latn\": \"bs-Latn\",\n \"sr-latn\": \"sr-Latn\",\n \"sr-cyrl\": \"sr-Cyrl\",\n \"mn\": \"mn-Mong\",\n }\n\n def __init__(self, settings: dict[str, str]):\n \"\"\"Check configuration.\"\"\"\n super().__init__(settings)\n self._access_token = None\n self._token_expiry = None\n\n # check settings for Microsoft region prefix\n region = \"\" if not self.settings[\"region\"] else f\"{self.settings['region']}.\"\n\n self._cognitive_token_url = TOKEN_URL.format(\n region,\n self.settings[\"endpoint_url\"],\n self.settings[\"key\"],\n )\n\n @staticmethod\n def migrate_settings():\n return {\n \"region\": settings.MT_MICROSOFT_REGION,\n \"endpoint_url\": settings.MT_MICROSOFT_ENDPOINT_URL,\n \"base_url\": settings.MT_MICROSOFT_BASE_URL,\n \"key\": settings.MT_MICROSOFT_COGNITIVE_KEY,\n }\n\n def get_url(self, suffix):\n return f\"https://{self.settings['base_url']}/{suffix}\"\n\n def is_token_expired(self):\n \"\"\"Check whether token is about to expire.\"\"\"\n return self._token_expiry <= timezone.now()\n\n def get_authentication(self):\n \"\"\"Hook for backends to allow add authentication headers to request.\"\"\"\n return {\"Authorization\": f\"Bearer {self.access_token}\"}\n\n @property\n def access_token(self):\n \"\"\"Obtain and caches access token.\"\"\"\n if self._access_token is None or self.is_token_expired():\n self._access_token = self.request(\n \"post\", self._cognitive_token_url, skip_auth=True\n ).text\n self._token_expiry = timezone.now() + TOKEN_EXPIRY\n\n return self._access_token\n\n def map_language_code(self, code):\n \"\"\"Convert language to service specific code.\"\"\"\n return super().map_language_code(code).replace(\"_\", \"-\")\n\n def download_languages(self):\n \"\"\"\n Download list of supported languages from a service.\n\n Example of the response:\n\n ['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',\n 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',\n 'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',\n 'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',\n 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',\n 'cy']\n \"\"\"\n response = self.request(\n \"get\", self.get_url(\"languages\"), params={\"api-version\": \"3.0\"}\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = response.apparent_encoding\n payload = response.json()\n\n # We should get an object, string usually means an error\n if isinstance(payload, str):\n raise MachineTranslationError(payload)\n\n return payload[\"translation\"].keys()\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n args = {\n \"api-version\": \"3.0\",\n \"from\": source,\n \"to\": language,\n \"category\": \"general\",\n }\n response = self.request(\n \"post\", self.get_url(\"translate\"), params=args, json=[{\"Text\": text[:5000]}]\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = \"utf-8-sig\"\n payload = response.json()\n yield {\n \"text\": payload[0][\"translations\"][0][\"text\"],\n \"quality\": self.max_score,\n \"service\": self.name,\n \"source\": text,\n }\n", "path": "weblate/machinery/microsoft.py"}], "after_files": [{"content": "# Copyright \u00a9 Michal \u010ciha\u0159 <[email protected]>\n#\n# SPDX-License-Identifier: GPL-3.0-or-later\n\nfrom __future__ import annotations\n\nfrom datetime import timedelta\n\nfrom django.conf import settings\nfrom django.utils import timezone\n\nfrom .base import MachineTranslation, MachineTranslationError\nfrom .forms import MicrosoftMachineryForm\n\nTOKEN_URL = \"https://{0}{1}/sts/v1.0/issueToken?Subscription-Key={2}\"\nTOKEN_EXPIRY = timedelta(minutes=9)\n\n\nclass MicrosoftCognitiveTranslation(MachineTranslation):\n \"\"\"Microsoft Cognitive Services Translator API support.\"\"\"\n\n name = \"Microsoft Translator\"\n max_score = 90\n settings_form = MicrosoftMachineryForm\n\n language_map = {\n \"zh-hant\": \"zh-Hant\",\n \"zh-hans\": \"zh-Hans\",\n \"zh-tw\": \"zh-Hant\",\n \"zh-cn\": \"zh-Hans\",\n \"tlh\": \"tlh-Latn\",\n \"tlh-qaak\": \"tlh-Piqd\",\n \"nb\": \"no\",\n \"bs-latn\": \"bs-Latn\",\n \"sr\": \"sr-Latn\",\n \"sr-latn\": \"sr-Latn\",\n \"sr-cyrl\": \"sr-Cyrl\",\n \"mn\": \"mn-Mong\",\n }\n\n def __init__(self, settings: dict[str, str]):\n \"\"\"Check configuration.\"\"\"\n super().__init__(settings)\n self._access_token = None\n self._token_expiry = None\n\n # check settings for Microsoft region prefix\n region = \"\" if not self.settings[\"region\"] else f\"{self.settings['region']}.\"\n\n self._cognitive_token_url = TOKEN_URL.format(\n region,\n self.settings[\"endpoint_url\"],\n self.settings[\"key\"],\n )\n\n @staticmethod\n def migrate_settings():\n return {\n \"region\": settings.MT_MICROSOFT_REGION,\n \"endpoint_url\": settings.MT_MICROSOFT_ENDPOINT_URL,\n \"base_url\": settings.MT_MICROSOFT_BASE_URL,\n \"key\": settings.MT_MICROSOFT_COGNITIVE_KEY,\n }\n\n def get_url(self, suffix):\n return f\"https://{self.settings['base_url']}/{suffix}\"\n\n def is_token_expired(self):\n \"\"\"Check whether token is about to expire.\"\"\"\n return self._token_expiry <= timezone.now()\n\n def get_authentication(self):\n \"\"\"Hook for backends to allow add authentication headers to request.\"\"\"\n return {\"Authorization\": f\"Bearer {self.access_token}\"}\n\n @property\n def access_token(self):\n \"\"\"Obtain and caches access token.\"\"\"\n if self._access_token is None or self.is_token_expired():\n self._access_token = self.request(\n \"post\", self._cognitive_token_url, skip_auth=True\n ).text\n self._token_expiry = timezone.now() + TOKEN_EXPIRY\n\n return self._access_token\n\n def map_language_code(self, code):\n \"\"\"Convert language to service specific code.\"\"\"\n return super().map_language_code(code).replace(\"_\", \"-\")\n\n def download_languages(self):\n \"\"\"\n Download list of supported languages from a service.\n\n Example of the response:\n\n ['af', 'ar', 'bs-Latn', 'bg', 'ca', 'zh-CHS', 'zh-CHT', 'yue', 'hr', 'cs', 'da',\n 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'de', 'el', 'ht', 'he', 'hi', 'mww',\n 'h', 'id', 'it', 'ja', 'sw', 'tlh', 'tlh-Qaak', 'ko', 'lv', 'lt', 'mg', 'ms',\n 'mt', 'yua', 'no', 'otq', 'fa', 'pl', 'pt', 'ro', 'r', 'sm', 'sr-Cyrl',\n 'sr-Latn', 'sk', 'sl', 'es', 'sv', 'ty', 'th', 'to', 'tr', 'uk', 'ur', 'vi',\n 'cy']\n \"\"\"\n response = self.request(\n \"get\", self.get_url(\"languages\"), params={\"api-version\": \"3.0\"}\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = response.apparent_encoding\n payload = response.json()\n\n # We should get an object, string usually means an error\n if isinstance(payload, str):\n raise MachineTranslationError(payload)\n\n return payload[\"translation\"].keys()\n\n def download_translations(\n self,\n source,\n language,\n text: str,\n unit,\n user,\n threshold: int = 75,\n ):\n \"\"\"Download list of possible translations from a service.\"\"\"\n args = {\n \"api-version\": \"3.0\",\n \"from\": source,\n \"to\": language,\n \"category\": \"general\",\n }\n response = self.request(\n \"post\", self.get_url(\"translate\"), params=args, json=[{\"Text\": text[:5000]}]\n )\n # Microsoft tends to use utf-8-sig instead of plain utf-8\n response.encoding = \"utf-8-sig\"\n payload = response.json()\n yield {\n \"text\": payload[0][\"translations\"][0][\"text\"],\n \"quality\": self.max_score,\n \"service\": self.name,\n \"source\": text,\n }\n", "path": "weblate/machinery/microsoft.py"}]} | 3,073 | 135 |
gh_patches_debug_27689 | rasdani/github-patches | git_diff | python-discord__site-1007 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add some more common abbreviations to the rule keyword command list
I believe it would be beneficial to add two more shorthands for invoking the 'Rules' embed: "hw" and "eng". "hw" is a common shorthand for "homework", so it should be associated with the embed for rule 8. Likewise, "eng" is a common abbreviation for "English", so it can be linked with rule 4.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pydis_site/apps/api/views.py`
Content:
```
1 from rest_framework.exceptions import ParseError
2 from rest_framework.request import Request
3 from rest_framework.response import Response
4 from rest_framework.views import APIView
5
6 from . import github_utils
7
8
9 class HealthcheckView(APIView):
10 """
11 Provides a simple view to check that the website is alive and well.
12
13 ## Routes
14 ### GET /healthcheck
15 Returns a simple JSON document showcasing whether the system is working:
16
17 >>> {
18 ... 'status': 'ok'
19 ... }
20
21 Seems to be.
22
23 ## Authentication
24 Does not require any authentication nor permissions.
25 """
26
27 authentication_classes = ()
28 permission_classes = ()
29
30 def get(self, request, format=None): # noqa: D102,ANN001,ANN201
31 return Response({'status': 'ok'})
32
33
34 class RulesView(APIView):
35 """
36 Return a list of the server's rules.
37
38 ## Routes
39 ### GET /rules
40 Returns a JSON array containing the server's rules
41 and keywords relating to each rule.
42 Example response:
43
44 >>> [
45 ... ["Eat candy.", ["candy", "sweets"]],
46 ... ["Wake up at 4 AM.", ["wake_up", "early", "early_bird"]],
47 ... ["Take your medicine.", ["medicine", "health"]]
48 ... ]
49
50 Since some of the the rules require links, this view
51 gives you the option to return rules in either Markdown
52 or HTML format by specifying the `link_format` query parameter
53 as either `md` or `html`. Specifying a different value than
54 `md` or `html` will return 400.
55
56 ## Authentication
57 Does not require any authentication nor permissions.
58 """
59
60 authentication_classes = ()
61 permission_classes = ()
62
63 @staticmethod
64 def _format_link(description: str, link: str, target: str) -> str:
65 """
66 Build the markup for rendering the link.
67
68 This will render `link` with `description` as its description in the given
69 `target` language.
70
71 Arguments:
72 description (str):
73 A textual description of the string. Represents the content
74 between the `<a>` tags in HTML, or the content between the
75 array brackets in Markdown.
76
77 link (str):
78 The resulting link that a user should be redirected to
79 upon clicking the generated element.
80
81 target (str):
82 One of `{'md', 'html'}`, denoting the target format that the
83 link should be rendered in.
84
85 Returns:
86 str:
87 The link, rendered appropriately for the given `target` format
88 using `description` as its textual description.
89
90 Raises:
91 ValueError:
92 If `target` is not `'md'` or `'html'`.
93 """
94 if target == 'html':
95 return f'<a href="{link}">{description}</a>'
96 elif target == 'md': # noqa: RET505
97 return f'[{description}]({link})'
98 else:
99 raise ValueError(
100 f"Can only template links to `html` or `md`, got `{target}`"
101 )
102
103 # `format` here is the result format, we have a link format here instead.
104 def get(self, request, format=None): # noqa: ANN001, ANN201
105 """
106 Returns a list of our community rules coupled with their keywords.
107
108 Each item in the returned list is a tuple with the rule as first item
109 and a list of keywords that match that rules as second item.
110 """
111 link_format = request.query_params.get('link_format', 'md')
112 if link_format not in ('html', 'md'):
113 raise ParseError(
114 f"`format` must be `html` or `md`, got `{format}`."
115 )
116
117 discord_community_guidelines = self._format_link(
118 'Discord Community Guidelines',
119 'https://discordapp.com/guidelines',
120 link_format
121 )
122 discord_tos = self._format_link(
123 'Terms of Service',
124 'https://discordapp.com/terms',
125 link_format
126 )
127 pydis_coc = self._format_link(
128 'Python Discord Code of Conduct',
129 'https://pythondiscord.com/pages/code-of-conduct/',
130 link_format
131 )
132
133 return Response([
134 (
135 f"Follow the {pydis_coc}.",
136 ["coc", "conduct", "code"]
137 ),
138 (
139 f"Follow the {discord_community_guidelines} and {discord_tos}.",
140 ["discord", "guidelines", "discord_tos"]
141 ),
142 (
143 "Respect staff members and listen to their instructions.",
144 ["respect", "staff", "instructions"]
145 ),
146 (
147 "Use English to the best of your ability. "
148 "Be polite if someone speaks English imperfectly.",
149 ["english", "language"]
150 ),
151 (
152 "Do not provide or request help on projects that may violate terms of service, "
153 "or that may be deemed inappropriate, malicious, or illegal.",
154 ["infraction", "tos", "breach", "malicious", "inappropriate", "illegal"]
155 ),
156 (
157 "Do not post unapproved advertising.",
158 ["ad", "ads", "advert", "advertising"]
159 ),
160 (
161 "Keep discussions relevant to the channel topic. "
162 "Each channel's description tells you the topic.",
163 ["off-topic", "topic", "relevance"]
164 ),
165 (
166 "Do not help with ongoing exams. When helping with homework, "
167 "help people learn how to do the assignment without doing it for them.",
168 ["exam", "exams", "assignment", "assignments", "homework"]
169 ),
170 (
171 "Do not offer or ask for paid work of any kind.",
172 ["paid", "work", "money"]
173 ),
174 (
175 "Do not copy and paste answers from ChatGPT or similar AI tools.",
176 ["gpt", "chatgpt", "gpt3", "ai"]
177 ),
178 ])
179
180
181 class GitHubArtifactsView(APIView):
182 """
183 Provides utilities for interacting with the GitHub API and obtaining action artifacts.
184
185 ## Routes
186 ### GET /github/artifacts
187 Returns a download URL for the artifact requested.
188
189 {
190 'url': 'https://pipelines.actions.githubusercontent.com/...'
191 }
192
193 ### Exceptions
194 In case of an error, the following body will be returned:
195
196 {
197 "error_type": "<error class name>",
198 "error": "<error description>",
199 "requested_resource": "<owner>/<repo>/<sha>/<artifact_name>"
200 }
201
202 ## Authentication
203 Does not require any authentication nor permissions.
204 """
205
206 authentication_classes = ()
207 permission_classes = ()
208
209 def get(
210 self,
211 request: Request,
212 *,
213 owner: str,
214 repo: str,
215 sha: str,
216 action_name: str,
217 artifact_name: str
218 ) -> Response:
219 """Return a download URL for the requested artifact."""
220 try:
221 url = github_utils.get_artifact(owner, repo, sha, action_name, artifact_name)
222 return Response({"url": url})
223 except github_utils.ArtifactProcessingError as e:
224 return Response({
225 "error_type": e.__class__.__name__,
226 "error": str(e),
227 "requested_resource": f"{owner}/{repo}/{sha}/{action_name}/{artifact_name}"
228 }, status=e.status)
229
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pydis_site/apps/api/views.py b/pydis_site/apps/api/views.py
--- a/pydis_site/apps/api/views.py
+++ b/pydis_site/apps/api/views.py
@@ -146,7 +146,7 @@
(
"Use English to the best of your ability. "
"Be polite if someone speaks English imperfectly.",
- ["english", "language"]
+ ["english", "eng", "language"]
),
(
"Do not provide or request help on projects that may violate terms of service, "
@@ -165,15 +165,15 @@
(
"Do not help with ongoing exams. When helping with homework, "
"help people learn how to do the assignment without doing it for them.",
- ["exam", "exams", "assignment", "assignments", "homework"]
+ ["exam", "exams", "assignment", "assignments", "homework", "hw"]
),
(
"Do not offer or ask for paid work of any kind.",
["paid", "work", "money"]
),
(
- "Do not copy and paste answers from ChatGPT or similar AI tools.",
- ["gpt", "chatgpt", "gpt3", "ai"]
+ "Do not copy and paste answers from ChatGPT or similar AI tools.",
+ ["gpt", "chatgpt", "gpt3", "ai"]
),
])
| {"golden_diff": "diff --git a/pydis_site/apps/api/views.py b/pydis_site/apps/api/views.py\n--- a/pydis_site/apps/api/views.py\n+++ b/pydis_site/apps/api/views.py\n@@ -146,7 +146,7 @@\n (\n \"Use English to the best of your ability. \"\n \"Be polite if someone speaks English imperfectly.\",\n- [\"english\", \"language\"]\n+ [\"english\", \"eng\", \"language\"]\n ),\n (\n \"Do not provide or request help on projects that may violate terms of service, \"\n@@ -165,15 +165,15 @@\n (\n \"Do not help with ongoing exams. When helping with homework, \"\n \"help people learn how to do the assignment without doing it for them.\",\n- [\"exam\", \"exams\", \"assignment\", \"assignments\", \"homework\"]\n+ [\"exam\", \"exams\", \"assignment\", \"assignments\", \"homework\", \"hw\"]\n ),\n (\n \"Do not offer or ask for paid work of any kind.\",\n [\"paid\", \"work\", \"money\"]\n ),\n (\n- \"Do not copy and paste answers from ChatGPT or similar AI tools.\",\n- [\"gpt\", \"chatgpt\", \"gpt3\", \"ai\"]\n+ \"Do not copy and paste answers from ChatGPT or similar AI tools.\",\n+ [\"gpt\", \"chatgpt\", \"gpt3\", \"ai\"]\n ),\n ])\n", "issue": "Add some more common abbreviations to the rule keyword command list\nI believe it would be beneficial to add two more shorthands for invoking the 'Rules' embed: \"hw\" and \"eng\". \"hw\" is a common shorthand for \"homework\", so it should be associated with the embed for rule 8. Likewise, \"eng\" is a common abbreviation for \"English\", so it can be linked with rule 4.\n", "before_files": [{"content": "from rest_framework.exceptions import ParseError\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom . import github_utils\n\n\nclass HealthcheckView(APIView):\n \"\"\"\n Provides a simple view to check that the website is alive and well.\n\n ## Routes\n ### GET /healthcheck\n Returns a simple JSON document showcasing whether the system is working:\n\n >>> {\n ... 'status': 'ok'\n ... }\n\n Seems to be.\n\n ## Authentication\n Does not require any authentication nor permissions.\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n def get(self, request, format=None): # noqa: D102,ANN001,ANN201\n return Response({'status': 'ok'})\n\n\nclass RulesView(APIView):\n \"\"\"\n Return a list of the server's rules.\n\n ## Routes\n ### GET /rules\n Returns a JSON array containing the server's rules\n and keywords relating to each rule.\n Example response:\n\n >>> [\n ... [\"Eat candy.\", [\"candy\", \"sweets\"]],\n ... [\"Wake up at 4 AM.\", [\"wake_up\", \"early\", \"early_bird\"]],\n ... [\"Take your medicine.\", [\"medicine\", \"health\"]]\n ... ]\n\n Since some of the the rules require links, this view\n gives you the option to return rules in either Markdown\n or HTML format by specifying the `link_format` query parameter\n as either `md` or `html`. Specifying a different value than\n `md` or `html` will return 400.\n\n ## Authentication\n Does not require any authentication nor permissions.\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n @staticmethod\n def _format_link(description: str, link: str, target: str) -> str:\n \"\"\"\n Build the markup for rendering the link.\n\n This will render `link` with `description` as its description in the given\n `target` language.\n\n Arguments:\n description (str):\n A textual description of the string. Represents the content\n between the `<a>` tags in HTML, or the content between the\n array brackets in Markdown.\n\n link (str):\n The resulting link that a user should be redirected to\n upon clicking the generated element.\n\n target (str):\n One of `{'md', 'html'}`, denoting the target format that the\n link should be rendered in.\n\n Returns:\n str:\n The link, rendered appropriately for the given `target` format\n using `description` as its textual description.\n\n Raises:\n ValueError:\n If `target` is not `'md'` or `'html'`.\n \"\"\"\n if target == 'html':\n return f'<a href=\"{link}\">{description}</a>'\n elif target == 'md': # noqa: RET505\n return f'[{description}]({link})'\n else:\n raise ValueError(\n f\"Can only template links to `html` or `md`, got `{target}`\"\n )\n\n # `format` here is the result format, we have a link format here instead.\n def get(self, request, format=None): # noqa: ANN001, ANN201\n \"\"\"\n Returns a list of our community rules coupled with their keywords.\n\n Each item in the returned list is a tuple with the rule as first item\n and a list of keywords that match that rules as second item.\n \"\"\"\n link_format = request.query_params.get('link_format', 'md')\n if link_format not in ('html', 'md'):\n raise ParseError(\n f\"`format` must be `html` or `md`, got `{format}`.\"\n )\n\n discord_community_guidelines = self._format_link(\n 'Discord Community Guidelines',\n 'https://discordapp.com/guidelines',\n link_format\n )\n discord_tos = self._format_link(\n 'Terms of Service',\n 'https://discordapp.com/terms',\n link_format\n )\n pydis_coc = self._format_link(\n 'Python Discord Code of Conduct',\n 'https://pythondiscord.com/pages/code-of-conduct/',\n link_format\n )\n\n return Response([\n (\n f\"Follow the {pydis_coc}.\",\n [\"coc\", \"conduct\", \"code\"]\n ),\n (\n f\"Follow the {discord_community_guidelines} and {discord_tos}.\",\n [\"discord\", \"guidelines\", \"discord_tos\"]\n ),\n (\n \"Respect staff members and listen to their instructions.\",\n [\"respect\", \"staff\", \"instructions\"]\n ),\n (\n \"Use English to the best of your ability. \"\n \"Be polite if someone speaks English imperfectly.\",\n [\"english\", \"language\"]\n ),\n (\n \"Do not provide or request help on projects that may violate terms of service, \"\n \"or that may be deemed inappropriate, malicious, or illegal.\",\n [\"infraction\", \"tos\", \"breach\", \"malicious\", \"inappropriate\", \"illegal\"]\n ),\n (\n \"Do not post unapproved advertising.\",\n [\"ad\", \"ads\", \"advert\", \"advertising\"]\n ),\n (\n \"Keep discussions relevant to the channel topic. \"\n \"Each channel's description tells you the topic.\",\n [\"off-topic\", \"topic\", \"relevance\"]\n ),\n (\n \"Do not help with ongoing exams. When helping with homework, \"\n \"help people learn how to do the assignment without doing it for them.\",\n [\"exam\", \"exams\", \"assignment\", \"assignments\", \"homework\"]\n ),\n (\n \"Do not offer or ask for paid work of any kind.\",\n [\"paid\", \"work\", \"money\"]\n ),\n (\n \"Do not copy and paste answers from ChatGPT or similar AI tools.\",\n [\"gpt\", \"chatgpt\", \"gpt3\", \"ai\"]\n ),\n ])\n\n\nclass GitHubArtifactsView(APIView):\n \"\"\"\n Provides utilities for interacting with the GitHub API and obtaining action artifacts.\n\n ## Routes\n ### GET /github/artifacts\n Returns a download URL for the artifact requested.\n\n {\n 'url': 'https://pipelines.actions.githubusercontent.com/...'\n }\n\n ### Exceptions\n In case of an error, the following body will be returned:\n\n {\n \"error_type\": \"<error class name>\",\n \"error\": \"<error description>\",\n \"requested_resource\": \"<owner>/<repo>/<sha>/<artifact_name>\"\n }\n\n ## Authentication\n Does not require any authentication nor permissions.\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n def get(\n self,\n request: Request,\n *,\n owner: str,\n repo: str,\n sha: str,\n action_name: str,\n artifact_name: str\n ) -> Response:\n \"\"\"Return a download URL for the requested artifact.\"\"\"\n try:\n url = github_utils.get_artifact(owner, repo, sha, action_name, artifact_name)\n return Response({\"url\": url})\n except github_utils.ArtifactProcessingError as e:\n return Response({\n \"error_type\": e.__class__.__name__,\n \"error\": str(e),\n \"requested_resource\": f\"{owner}/{repo}/{sha}/{action_name}/{artifact_name}\"\n }, status=e.status)\n", "path": "pydis_site/apps/api/views.py"}], "after_files": [{"content": "from rest_framework.exceptions import ParseError\nfrom rest_framework.request import Request\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\n\nfrom . import github_utils\n\n\nclass HealthcheckView(APIView):\n \"\"\"\n Provides a simple view to check that the website is alive and well.\n\n ## Routes\n ### GET /healthcheck\n Returns a simple JSON document showcasing whether the system is working:\n\n >>> {\n ... 'status': 'ok'\n ... }\n\n Seems to be.\n\n ## Authentication\n Does not require any authentication nor permissions.\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n def get(self, request, format=None): # noqa: D102,ANN001,ANN201\n return Response({'status': 'ok'})\n\n\nclass RulesView(APIView):\n \"\"\"\n Return a list of the server's rules.\n\n ## Routes\n ### GET /rules\n Returns a JSON array containing the server's rules\n and keywords relating to each rule.\n Example response:\n\n >>> [\n ... [\"Eat candy.\", [\"candy\", \"sweets\"]],\n ... [\"Wake up at 4 AM.\", [\"wake_up\", \"early\", \"early_bird\"]],\n ... [\"Take your medicine.\", [\"medicine\", \"health\"]]\n ... ]\n\n Since some of the the rules require links, this view\n gives you the option to return rules in either Markdown\n or HTML format by specifying the `link_format` query parameter\n as either `md` or `html`. Specifying a different value than\n `md` or `html` will return 400.\n\n ## Authentication\n Does not require any authentication nor permissions.\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n @staticmethod\n def _format_link(description: str, link: str, target: str) -> str:\n \"\"\"\n Build the markup for rendering the link.\n\n This will render `link` with `description` as its description in the given\n `target` language.\n\n Arguments:\n description (str):\n A textual description of the string. Represents the content\n between the `<a>` tags in HTML, or the content between the\n array brackets in Markdown.\n\n link (str):\n The resulting link that a user should be redirected to\n upon clicking the generated element.\n\n target (str):\n One of `{'md', 'html'}`, denoting the target format that the\n link should be rendered in.\n\n Returns:\n str:\n The link, rendered appropriately for the given `target` format\n using `description` as its textual description.\n\n Raises:\n ValueError:\n If `target` is not `'md'` or `'html'`.\n \"\"\"\n if target == 'html':\n return f'<a href=\"{link}\">{description}</a>'\n elif target == 'md': # noqa: RET505\n return f'[{description}]({link})'\n else:\n raise ValueError(\n f\"Can only template links to `html` or `md`, got `{target}`\"\n )\n\n # `format` here is the result format, we have a link format here instead.\n def get(self, request, format=None): # noqa: ANN001, ANN201\n \"\"\"\n Returns a list of our community rules coupled with their keywords.\n\n Each item in the returned list is a tuple with the rule as first item\n and a list of keywords that match that rules as second item.\n \"\"\"\n link_format = request.query_params.get('link_format', 'md')\n if link_format not in ('html', 'md'):\n raise ParseError(\n f\"`format` must be `html` or `md`, got `{format}`.\"\n )\n\n discord_community_guidelines = self._format_link(\n 'Discord Community Guidelines',\n 'https://discordapp.com/guidelines',\n link_format\n )\n discord_tos = self._format_link(\n 'Terms of Service',\n 'https://discordapp.com/terms',\n link_format\n )\n pydis_coc = self._format_link(\n 'Python Discord Code of Conduct',\n 'https://pythondiscord.com/pages/code-of-conduct/',\n link_format\n )\n\n return Response([\n (\n f\"Follow the {pydis_coc}.\",\n [\"coc\", \"conduct\", \"code\"]\n ),\n (\n f\"Follow the {discord_community_guidelines} and {discord_tos}.\",\n [\"discord\", \"guidelines\", \"discord_tos\"]\n ),\n (\n \"Respect staff members and listen to their instructions.\",\n [\"respect\", \"staff\", \"instructions\"]\n ),\n (\n \"Use English to the best of your ability. \"\n \"Be polite if someone speaks English imperfectly.\",\n [\"english\", \"eng\", \"language\"]\n ),\n (\n \"Do not provide or request help on projects that may violate terms of service, \"\n \"or that may be deemed inappropriate, malicious, or illegal.\",\n [\"infraction\", \"tos\", \"breach\", \"malicious\", \"inappropriate\", \"illegal\"]\n ),\n (\n \"Do not post unapproved advertising.\",\n [\"ad\", \"ads\", \"advert\", \"advertising\"]\n ),\n (\n \"Keep discussions relevant to the channel topic. \"\n \"Each channel's description tells you the topic.\",\n [\"off-topic\", \"topic\", \"relevance\"]\n ),\n (\n \"Do not help with ongoing exams. When helping with homework, \"\n \"help people learn how to do the assignment without doing it for them.\",\n [\"exam\", \"exams\", \"assignment\", \"assignments\", \"homework\", \"hw\"]\n ),\n (\n \"Do not offer or ask for paid work of any kind.\",\n [\"paid\", \"work\", \"money\"]\n ),\n (\n \"Do not copy and paste answers from ChatGPT or similar AI tools.\",\n [\"gpt\", \"chatgpt\", \"gpt3\", \"ai\"]\n ),\n ])\n\n\nclass GitHubArtifactsView(APIView):\n \"\"\"\n Provides utilities for interacting with the GitHub API and obtaining action artifacts.\n\n ## Routes\n ### GET /github/artifacts\n Returns a download URL for the artifact requested.\n\n {\n 'url': 'https://pipelines.actions.githubusercontent.com/...'\n }\n\n ### Exceptions\n In case of an error, the following body will be returned:\n\n {\n \"error_type\": \"<error class name>\",\n \"error\": \"<error description>\",\n \"requested_resource\": \"<owner>/<repo>/<sha>/<artifact_name>\"\n }\n\n ## Authentication\n Does not require any authentication nor permissions.\n \"\"\"\n\n authentication_classes = ()\n permission_classes = ()\n\n def get(\n self,\n request: Request,\n *,\n owner: str,\n repo: str,\n sha: str,\n action_name: str,\n artifact_name: str\n ) -> Response:\n \"\"\"Return a download URL for the requested artifact.\"\"\"\n try:\n url = github_utils.get_artifact(owner, repo, sha, action_name, artifact_name)\n return Response({\"url\": url})\n except github_utils.ArtifactProcessingError as e:\n return Response({\n \"error_type\": e.__class__.__name__,\n \"error\": str(e),\n \"requested_resource\": f\"{owner}/{repo}/{sha}/{action_name}/{artifact_name}\"\n }, status=e.status)\n", "path": "pydis_site/apps/api/views.py"}]} | 2,536 | 320 |
gh_patches_debug_39336 | rasdani/github-patches | git_diff | chainer__chainer-4347 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
2nd order derivative of ELU should not give NaN
The 2nd order derivative of ELU gives off NaN about once out of hundreds of iterations. Then the entire network will instantly contaminated with NaN.
I tracked the cause and the following code in backward() of class ELUGrad in chainer/chainer/functions/activation/elu.py is the cause
```
if 1 in indexes:
ret.append(ggxgx / gy)
```
It is natural that this division will give NaN if some element of gy is zero. Zero will occur when the single-precision floating point subtraction underflows.
For your information, I am circumventing the issue by making minor modification to elu.py as in the attached file, which uses the same way to calculate derivative as the mathematical functions like F.exp.
[elu.py.txt](https://github.com/chainer/chainer/files/1683829/elu.py.txt)
How to reproduce:
I am using Chainer 3.2.0, but that part of the ELU source code is not different in v4.0, therefore I think this issue persists over the versions.
```
>>> import chainer
>>> import numpy as np
>>> x = chainer.Variable(np.array([[0, 0]],dtype=np.float32))
>>> y = chainer.functions.elu(x)
>>> y
variable([[ 0., 0.]])
>>>
>>> y.grad = (np.array([[0, 1e-30]],dtype=np.float32))
>>> y.backward(enable_double_backprop=True)
>>>
>>> x.grad_var.grad = np.array([[1, 1]],dtype=np.float32)
>>> x.grad_var.backward()
/home/mogami/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/functions/math/basic_math.py:322: RuntimeWarning: invalid value encountered in true_divide
return utils.force_array(x[0] / x[1]),
>>> y.grad_var
variable([[ 0.00000000e+00, 1.00000000e-30]])
>>> y.grad_var.grad
array([[ nan, 1.]], dtype=float32)
```
The first element is nan, though it should be 1.0 in this case. This example may seem silly when considering ELU only, but having zero for some of elements often happens when dy is back propagated from somewhere else because of underflow.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chainer/functions/activation/elu.py`
Content:
```
1 import numpy
2
3 from chainer.backends import cuda
4 from chainer import function_node
5 from chainer.utils import type_check
6
7
8 class ELU(function_node.FunctionNode):
9
10 """Exponential Linear Unit."""
11
12 def __init__(self, alpha=1.0):
13 self.alpha = float(alpha)
14
15 def check_type_forward(self, in_types):
16 type_check.expect(in_types.size() == 1)
17 x_type, = in_types
18
19 type_check.expect(x_type.dtype.kind == 'f')
20
21 def forward_cpu(self, x):
22 self.retain_inputs((0,))
23 y = x[0].copy()
24 neg_indices = x[0] < 0
25 y[neg_indices] = self.alpha * (numpy.exp(y[neg_indices]) - 1)
26 return y,
27
28 def forward_gpu(self, x):
29 self.retain_inputs((0,))
30 y = cuda.elementwise(
31 'T x, T alpha', 'T y',
32 'y = x >= 0 ? x : (T)(alpha * (exp(x) - 1))',
33 'elu_fwd')(
34 x[0], self.alpha)
35 return y,
36
37 def backward(self, indexes, grad_outputs):
38 x, = self.get_retained_inputs()
39 gy, = grad_outputs
40 return ELUGrad(self.alpha).apply((x, gy))
41
42
43 class ELUGrad(function_node.FunctionNode):
44
45 """Exponential Linear Unit gradient function."""
46
47 def __init__(self, alpha):
48 self.alpha = alpha
49
50 def check_type_forward(self, in_types):
51 type_check.expect(in_types.size() == 2)
52 type_check.expect(in_types[0].dtype.kind == 'f')
53 type_check.expect(in_types[1].dtype.kind == 'f')
54
55 def forward_cpu(self, inputs):
56 x, gy = inputs
57 gx = gy.copy()
58 neg_indices = x < 0
59 gx[neg_indices] *= self.alpha * numpy.exp(x[neg_indices])
60 self.retain_inputs((0, 1))
61 self.retain_outputs((0,))
62 return gx,
63
64 def forward_gpu(self, inputs):
65 x, gy = inputs
66 gx = cuda.elementwise(
67 'T x, T gy, T alpha', 'T gx',
68 'gx = x >= 0 ? gy : (T)(gy * alpha * exp(x))',
69 'elu_bwd')(
70 x, gy, self.alpha)
71 self.retain_inputs((0, 1))
72 self.retain_outputs((0,))
73 return gx,
74
75 def backward(self, indexes, grad_outputs):
76 x, gy = self.get_retained_inputs()
77 gx, = self.get_retained_outputs()
78 ggx, = grad_outputs
79 ggxgx = ggx * gx
80
81 ret = []
82 if 0 in indexes:
83 ret.append(ggxgx * (x.data < 0))
84 if 1 in indexes:
85 ret.append(ggxgx / gy)
86
87 return ret
88
89
90 def elu(x, alpha=1.0):
91 """Exponential Linear Unit function.
92
93 For a parameter :math:`\\alpha`, it is expressed as
94
95 .. math::
96 f(x) = \\left \\{ \\begin{array}{ll}
97 x & {\\rm if}~ x \\ge 0 \\\\
98 \\alpha (\\exp(x) - 1) & {\\rm if}~ x < 0,
99 \\end{array} \\right.
100
101 See: https://arxiv.org/abs/1511.07289
102
103 Args:
104 x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
105 :class:`cupy.ndarray`):
106 Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.
107 alpha (float): Parameter :math:`\\alpha`. Default is 1.0.
108
109 Returns:
110 ~chainer.Variable: Output variable. A
111 :math:`(s_1, s_2, ..., s_N)`-shaped float array.
112
113 .. admonition:: Example
114
115 >>> x = np.array([[-1, 0], [2, -3]], 'f')
116 >>> x
117 array([[-1., 0.],
118 [ 2., -3.]], dtype=float32)
119 >>> y = F.elu(x, alpha=1.)
120 >>> y.data
121 array([[-0.63212055, 0. ],
122 [ 2. , -0.95021296]], dtype=float32)
123
124 """
125 return ELU(alpha=alpha).apply((x,))[0]
126
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chainer/functions/activation/elu.py b/chainer/functions/activation/elu.py
--- a/chainer/functions/activation/elu.py
+++ b/chainer/functions/activation/elu.py
@@ -22,14 +22,14 @@
self.retain_inputs((0,))
y = x[0].copy()
neg_indices = x[0] < 0
- y[neg_indices] = self.alpha * (numpy.exp(y[neg_indices]) - 1)
+ y[neg_indices] = self.alpha * (numpy.expm1(y[neg_indices]))
return y,
def forward_gpu(self, x):
self.retain_inputs((0,))
y = cuda.elementwise(
'T x, T alpha', 'T y',
- 'y = x >= 0 ? x : (T)(alpha * (exp(x) - 1))',
+ 'y = x >= 0 ? x : (T)(alpha * expm1(x))',
'elu_fwd')(
x[0], self.alpha)
return y,
@@ -37,7 +37,7 @@
def backward(self, indexes, grad_outputs):
x, = self.get_retained_inputs()
gy, = grad_outputs
- return ELUGrad(self.alpha).apply((x, gy))
+ return ELUGrad(self.alpha).apply((x,))[0] * gy,
class ELUGrad(function_node.FunctionNode):
@@ -48,43 +48,34 @@
self.alpha = alpha
def check_type_forward(self, in_types):
- type_check.expect(in_types.size() == 2)
+ type_check.expect(in_types.size() == 1)
type_check.expect(in_types[0].dtype.kind == 'f')
- type_check.expect(in_types[1].dtype.kind == 'f')
def forward_cpu(self, inputs):
- x, gy = inputs
- gx = gy.copy()
+ x, = inputs
+ gx = numpy.ones_like(x)
neg_indices = x < 0
gx[neg_indices] *= self.alpha * numpy.exp(x[neg_indices])
- self.retain_inputs((0, 1))
+ self.retain_inputs((0,))
self.retain_outputs((0,))
return gx,
def forward_gpu(self, inputs):
- x, gy = inputs
+ x, = inputs
gx = cuda.elementwise(
- 'T x, T gy, T alpha', 'T gx',
- 'gx = x >= 0 ? gy : (T)(gy * alpha * exp(x))',
+ 'T x, T alpha', 'T gx',
+ 'gx = x >= 0 ? (T)1 : (T)(alpha * exp(x))',
'elu_bwd')(
- x, gy, self.alpha)
- self.retain_inputs((0, 1))
+ x, self.alpha)
+ self.retain_inputs((0,))
self.retain_outputs((0,))
return gx,
def backward(self, indexes, grad_outputs):
- x, gy = self.get_retained_inputs()
+ x, = self.get_retained_inputs()
gx, = self.get_retained_outputs()
ggx, = grad_outputs
- ggxgx = ggx * gx
-
- ret = []
- if 0 in indexes:
- ret.append(ggxgx * (x.data < 0))
- if 1 in indexes:
- ret.append(ggxgx / gy)
-
- return ret
+ return ggx * gx * (x.data < 0),
def elu(x, alpha=1.0):
| {"golden_diff": "diff --git a/chainer/functions/activation/elu.py b/chainer/functions/activation/elu.py\n--- a/chainer/functions/activation/elu.py\n+++ b/chainer/functions/activation/elu.py\n@@ -22,14 +22,14 @@\n self.retain_inputs((0,))\n y = x[0].copy()\n neg_indices = x[0] < 0\n- y[neg_indices] = self.alpha * (numpy.exp(y[neg_indices]) - 1)\n+ y[neg_indices] = self.alpha * (numpy.expm1(y[neg_indices]))\n return y,\n \n def forward_gpu(self, x):\n self.retain_inputs((0,))\n y = cuda.elementwise(\n 'T x, T alpha', 'T y',\n- 'y = x >= 0 ? x : (T)(alpha * (exp(x) - 1))',\n+ 'y = x >= 0 ? x : (T)(alpha * expm1(x))',\n 'elu_fwd')(\n x[0], self.alpha)\n return y,\n@@ -37,7 +37,7 @@\n def backward(self, indexes, grad_outputs):\n x, = self.get_retained_inputs()\n gy, = grad_outputs\n- return ELUGrad(self.alpha).apply((x, gy))\n+ return ELUGrad(self.alpha).apply((x,))[0] * gy,\n \n \n class ELUGrad(function_node.FunctionNode):\n@@ -48,43 +48,34 @@\n self.alpha = alpha\n \n def check_type_forward(self, in_types):\n- type_check.expect(in_types.size() == 2)\n+ type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].dtype.kind == 'f')\n- type_check.expect(in_types[1].dtype.kind == 'f')\n \n def forward_cpu(self, inputs):\n- x, gy = inputs\n- gx = gy.copy()\n+ x, = inputs\n+ gx = numpy.ones_like(x)\n neg_indices = x < 0\n gx[neg_indices] *= self.alpha * numpy.exp(x[neg_indices])\n- self.retain_inputs((0, 1))\n+ self.retain_inputs((0,))\n self.retain_outputs((0,))\n return gx,\n \n def forward_gpu(self, inputs):\n- x, gy = inputs\n+ x, = inputs\n gx = cuda.elementwise(\n- 'T x, T gy, T alpha', 'T gx',\n- 'gx = x >= 0 ? gy : (T)(gy * alpha * exp(x))',\n+ 'T x, T alpha', 'T gx',\n+ 'gx = x >= 0 ? (T)1 : (T)(alpha * exp(x))',\n 'elu_bwd')(\n- x, gy, self.alpha)\n- self.retain_inputs((0, 1))\n+ x, self.alpha)\n+ self.retain_inputs((0,))\n self.retain_outputs((0,))\n return gx,\n \n def backward(self, indexes, grad_outputs):\n- x, gy = self.get_retained_inputs()\n+ x, = self.get_retained_inputs()\n gx, = self.get_retained_outputs()\n ggx, = grad_outputs\n- ggxgx = ggx * gx\n-\n- ret = []\n- if 0 in indexes:\n- ret.append(ggxgx * (x.data < 0))\n- if 1 in indexes:\n- ret.append(ggxgx / gy)\n-\n- return ret\n+ return ggx * gx * (x.data < 0),\n \n \n def elu(x, alpha=1.0):\n", "issue": "2nd order derivative of ELU should not give NaN\nThe 2nd order derivative of ELU gives off NaN about once out of hundreds of iterations. Then the entire network will instantly contaminated with NaN.\r\n\r\nI tracked the cause and the following code in backward() of class ELUGrad in chainer/chainer/functions/activation/elu.py is the cause\r\n```\r\n if 1 in indexes:\r\n ret.append(ggxgx / gy)\r\n```\r\n\r\nIt is natural that this division will give NaN if some element of gy is zero. Zero will occur when the single-precision floating point subtraction underflows.\r\n\r\nFor your information, I am circumventing the issue by making minor modification to elu.py as in the attached file, which uses the same way to calculate derivative as the mathematical functions like F.exp.\r\n[elu.py.txt](https://github.com/chainer/chainer/files/1683829/elu.py.txt)\r\n\r\n\r\nHow to reproduce:\r\nI am using Chainer 3.2.0, but that part of the ELU source code is not different in v4.0, therefore I think this issue persists over the versions.\r\n```\r\n>>> import chainer\r\n>>> import numpy as np\r\n>>> x = chainer.Variable(np.array([[0, 0]],dtype=np.float32))\r\n>>> y = chainer.functions.elu(x)\r\n>>> y\r\nvariable([[ 0., 0.]])\r\n>>> \r\n>>> y.grad = (np.array([[0, 1e-30]],dtype=np.float32))\r\n>>> y.backward(enable_double_backprop=True)\r\n>>> \r\n>>> x.grad_var.grad = np.array([[1, 1]],dtype=np.float32)\r\n>>> x.grad_var.backward()\r\n/home/mogami/.pyenv/versions/anaconda3-4.2.0/lib/python3.5/site-packages/chainer/functions/math/basic_math.py:322: RuntimeWarning: invalid value encountered in true_divide\r\n return utils.force_array(x[0] / x[1]),\r\n>>> y.grad_var\r\nvariable([[ 0.00000000e+00, 1.00000000e-30]])\r\n>>> y.grad_var.grad\r\narray([[ nan, 1.]], dtype=float32)\r\n```\r\nThe first element is nan, though it should be 1.0 in this case. This example may seem silly when considering ELU only, but having zero for some of elements often happens when dy is back propagated from somewhere else because of underflow.\n", "before_files": [{"content": "import numpy\n\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\nclass ELU(function_node.FunctionNode):\n\n \"\"\"Exponential Linear Unit.\"\"\"\n\n def __init__(self, alpha=1.0):\n self.alpha = float(alpha)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n\n type_check.expect(x_type.dtype.kind == 'f')\n\n def forward_cpu(self, x):\n self.retain_inputs((0,))\n y = x[0].copy()\n neg_indices = x[0] < 0\n y[neg_indices] = self.alpha * (numpy.exp(y[neg_indices]) - 1)\n return y,\n\n def forward_gpu(self, x):\n self.retain_inputs((0,))\n y = cuda.elementwise(\n 'T x, T alpha', 'T y',\n 'y = x >= 0 ? x : (T)(alpha * (exp(x) - 1))',\n 'elu_fwd')(\n x[0], self.alpha)\n return y,\n\n def backward(self, indexes, grad_outputs):\n x, = self.get_retained_inputs()\n gy, = grad_outputs\n return ELUGrad(self.alpha).apply((x, gy))\n\n\nclass ELUGrad(function_node.FunctionNode):\n\n \"\"\"Exponential Linear Unit gradient function.\"\"\"\n\n def __init__(self, alpha):\n self.alpha = alpha\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 2)\n type_check.expect(in_types[0].dtype.kind == 'f')\n type_check.expect(in_types[1].dtype.kind == 'f')\n\n def forward_cpu(self, inputs):\n x, gy = inputs\n gx = gy.copy()\n neg_indices = x < 0\n gx[neg_indices] *= self.alpha * numpy.exp(x[neg_indices])\n self.retain_inputs((0, 1))\n self.retain_outputs((0,))\n return gx,\n\n def forward_gpu(self, inputs):\n x, gy = inputs\n gx = cuda.elementwise(\n 'T x, T gy, T alpha', 'T gx',\n 'gx = x >= 0 ? gy : (T)(gy * alpha * exp(x))',\n 'elu_bwd')(\n x, gy, self.alpha)\n self.retain_inputs((0, 1))\n self.retain_outputs((0,))\n return gx,\n\n def backward(self, indexes, grad_outputs):\n x, gy = self.get_retained_inputs()\n gx, = self.get_retained_outputs()\n ggx, = grad_outputs\n ggxgx = ggx * gx\n\n ret = []\n if 0 in indexes:\n ret.append(ggxgx * (x.data < 0))\n if 1 in indexes:\n ret.append(ggxgx / gy)\n\n return ret\n\n\ndef elu(x, alpha=1.0):\n \"\"\"Exponential Linear Unit function.\n\n For a parameter :math:`\\\\alpha`, it is expressed as\n\n .. math::\n f(x) = \\\\left \\\\{ \\\\begin{array}{ll}\n x & {\\\\rm if}~ x \\\\ge 0 \\\\\\\\\n \\\\alpha (\\\\exp(x) - 1) & {\\\\rm if}~ x < 0,\n \\\\end{array} \\\\right.\n\n See: https://arxiv.org/abs/1511.07289\n\n Args:\n x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \\\n :class:`cupy.ndarray`):\n Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.\n alpha (float): Parameter :math:`\\\\alpha`. Default is 1.0.\n\n Returns:\n ~chainer.Variable: Output variable. A\n :math:`(s_1, s_2, ..., s_N)`-shaped float array.\n\n .. admonition:: Example\n\n >>> x = np.array([[-1, 0], [2, -3]], 'f')\n >>> x\n array([[-1., 0.],\n [ 2., -3.]], dtype=float32)\n >>> y = F.elu(x, alpha=1.)\n >>> y.data\n array([[-0.63212055, 0. ],\n [ 2. , -0.95021296]], dtype=float32)\n\n \"\"\"\n return ELU(alpha=alpha).apply((x,))[0]\n", "path": "chainer/functions/activation/elu.py"}], "after_files": [{"content": "import numpy\n\nfrom chainer.backends import cuda\nfrom chainer import function_node\nfrom chainer.utils import type_check\n\n\nclass ELU(function_node.FunctionNode):\n\n \"\"\"Exponential Linear Unit.\"\"\"\n\n def __init__(self, alpha=1.0):\n self.alpha = float(alpha)\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n x_type, = in_types\n\n type_check.expect(x_type.dtype.kind == 'f')\n\n def forward_cpu(self, x):\n self.retain_inputs((0,))\n y = x[0].copy()\n neg_indices = x[0] < 0\n y[neg_indices] = self.alpha * (numpy.expm1(y[neg_indices]))\n return y,\n\n def forward_gpu(self, x):\n self.retain_inputs((0,))\n y = cuda.elementwise(\n 'T x, T alpha', 'T y',\n 'y = x >= 0 ? x : (T)(alpha * expm1(x))',\n 'elu_fwd')(\n x[0], self.alpha)\n return y,\n\n def backward(self, indexes, grad_outputs):\n x, = self.get_retained_inputs()\n gy, = grad_outputs\n return ELUGrad(self.alpha).apply((x,))[0] * gy,\n\n\nclass ELUGrad(function_node.FunctionNode):\n\n \"\"\"Exponential Linear Unit gradient function.\"\"\"\n\n def __init__(self, alpha):\n self.alpha = alpha\n\n def check_type_forward(self, in_types):\n type_check.expect(in_types.size() == 1)\n type_check.expect(in_types[0].dtype.kind == 'f')\n\n def forward_cpu(self, inputs):\n x, = inputs\n gx = numpy.ones_like(x)\n neg_indices = x < 0\n gx[neg_indices] *= self.alpha * numpy.exp(x[neg_indices])\n self.retain_inputs((0,))\n self.retain_outputs((0,))\n return gx,\n\n def forward_gpu(self, inputs):\n x, = inputs\n gx = cuda.elementwise(\n 'T x, T alpha', 'T gx',\n 'gx = x >= 0 ? (T)1 : (T)(alpha * exp(x))',\n 'elu_bwd')(\n x, self.alpha)\n self.retain_inputs((0,))\n self.retain_outputs((0,))\n return gx,\n\n def backward(self, indexes, grad_outputs):\n x, = self.get_retained_inputs()\n gx, = self.get_retained_outputs()\n ggx, = grad_outputs\n return ggx * gx * (x.data < 0),\n\n\ndef elu(x, alpha=1.0):\n \"\"\"Exponential Linear Unit function.\n\n For a parameter :math:`\\\\alpha`, it is expressed as\n\n .. math::\n f(x) = \\\\left \\\\{ \\\\begin{array}{ll}\n x & {\\\\rm if}~ x \\\\ge 0 \\\\\\\\\n \\\\alpha (\\\\exp(x) - 1) & {\\\\rm if}~ x < 0,\n \\\\end{array} \\\\right.\n\n See: https://arxiv.org/abs/1511.07289\n\n Args:\n x (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \\\n :class:`cupy.ndarray`):\n Input variable. A :math:`(s_1, s_2, ..., s_N)`-shaped float array.\n alpha (float): Parameter :math:`\\\\alpha`. Default is 1.0.\n\n Returns:\n ~chainer.Variable: Output variable. A\n :math:`(s_1, s_2, ..., s_N)`-shaped float array.\n\n .. admonition:: Example\n\n >>> x = np.array([[-1, 0], [2, -3]], 'f')\n >>> x\n array([[-1., 0.],\n [ 2., -3.]], dtype=float32)\n >>> y = F.elu(x, alpha=1.)\n >>> y.data\n array([[-0.63212055, 0. ],\n [ 2. , -0.95021296]], dtype=float32)\n\n \"\"\"\n return ELU(alpha=alpha).apply((x,))[0]\n", "path": "chainer/functions/activation/elu.py"}]} | 2,105 | 813 |
gh_patches_debug_32691 | rasdani/github-patches | git_diff | mindsdb__lightwood-619 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`Img2VecEncoder` calls `self.prepare` twice
## Your Environment
* Python version: Python 3.8.10
* Operating system: Ubuntu 20.04.3 LTS
* Lightwood version: 1.3.0
## Describe your issue
`Img2VecEncoder` crashes when `.encode(images)` is called.
Internally there seems to be some confusion on what the `.prepare` method actually does.
For some reason it is called to convert images to tensors, while in reality it should be used to initialize the model and (maybe) perform some initial training.
## Fixing the issue
Implement a method to convert images to torch tensors.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `lightwood/encoder/image/img_2_vec.py`
Content:
```
1 import logging
2 import torch
3 import torchvision.transforms as transforms
4 from lightwood.encoder.image.helpers.img_to_vec import Img2Vec
5 from lightwood.encoder.base import BaseEncoder
6
7
8 class Img2VecEncoder(BaseEncoder):
9
10 def __init__(self, is_target: bool = False):
11 super().__init__(is_target)
12 self.model = None
13 # I think we should make this an enum, something like: speed, balance, accuracy
14 self.aim = aim
15 self._prepared = False
16
17 self._scaler = transforms.Scale((224, 224))
18 self._normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
19 self._to_tensor = transforms.ToTensor()
20
21 pil_logger = logging.getLogger('PIL')
22 pil_logger.setLevel(logging.ERROR)
23
24 def prepare(self, priming_data):
25 if self._prepared:
26 raise Exception('You can only call "prepare" once for a given encoder.')
27
28 if self.model is None:
29 self.model = Img2Vec(model='resnext-50-small')
30 self._prepared = True
31
32 def encode(self, images):
33 """
34 Encode list of images
35
36 :images : list of images, each image is a path to a file or a url
37 :return: a torch.floatTensor
38 """
39 if not self._prepared:
40 raise Exception('You need to call "prepare" before calling "encode" or "decode".')
41
42 img_tensors = self.prepare(images)
43 vec_arr = []
44 self.model.eval()
45 with torch.no_grad():
46 for img_tensor in img_tensors:
47 vec = self.model(img_tensor.unsqueeze(0), batch=False)
48 vec_arr.append(vec)
49 return torch.stack(vec_arr)
50
51 def decode(self, encoded_values_tensor):
52 raise Exception('This encoder is not bi-directional')
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/lightwood/encoder/image/img_2_vec.py b/lightwood/encoder/image/img_2_vec.py
--- a/lightwood/encoder/image/img_2_vec.py
+++ b/lightwood/encoder/image/img_2_vec.py
@@ -3,6 +3,7 @@
import torchvision.transforms as transforms
from lightwood.encoder.image.helpers.img_to_vec import Img2Vec
from lightwood.encoder.base import BaseEncoder
+from PIL import Image
class Img2VecEncoder(BaseEncoder):
@@ -10,13 +11,18 @@
def __init__(self, is_target: bool = False):
super().__init__(is_target)
self.model = None
- # I think we should make this an enum, something like: speed, balance, accuracy
- self.aim = aim
+ # # I think we should make this an enum, something like: speed, balance, accuracy
+ # self.aim = aim
self._prepared = False
- self._scaler = transforms.Scale((224, 224))
+ self._scaler = transforms.Resize((224, 224))
self._normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
self._to_tensor = transforms.ToTensor()
+ self._img_to_tensor = transforms.Compose([
+ self._scaler,
+ self._to_tensor,
+ self._normalize
+ ])
pil_logger = logging.getLogger('PIL')
pil_logger.setLevel(logging.ERROR)
@@ -39,8 +45,11 @@
if not self._prepared:
raise Exception('You need to call "prepare" before calling "encode" or "decode".')
- img_tensors = self.prepare(images)
+ img_tensors = [self._img_to_tensor(
+ Image.open(img_path)
+ ) for img_path in images]
vec_arr = []
+
self.model.eval()
with torch.no_grad():
for img_tensor in img_tensors:
| {"golden_diff": "diff --git a/lightwood/encoder/image/img_2_vec.py b/lightwood/encoder/image/img_2_vec.py\n--- a/lightwood/encoder/image/img_2_vec.py\n+++ b/lightwood/encoder/image/img_2_vec.py\n@@ -3,6 +3,7 @@\n import torchvision.transforms as transforms\n from lightwood.encoder.image.helpers.img_to_vec import Img2Vec\n from lightwood.encoder.base import BaseEncoder\n+from PIL import Image\n \n \n class Img2VecEncoder(BaseEncoder):\n@@ -10,13 +11,18 @@\n def __init__(self, is_target: bool = False):\n super().__init__(is_target)\n self.model = None\n- # I think we should make this an enum, something like: speed, balance, accuracy\n- self.aim = aim\n+ # # I think we should make this an enum, something like: speed, balance, accuracy\n+ # self.aim = aim\n self._prepared = False\n \n- self._scaler = transforms.Scale((224, 224))\n+ self._scaler = transforms.Resize((224, 224))\n self._normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n self._to_tensor = transforms.ToTensor()\n+ self._img_to_tensor = transforms.Compose([\n+ self._scaler,\n+ self._to_tensor,\n+ self._normalize\n+ ])\n \n pil_logger = logging.getLogger('PIL')\n pil_logger.setLevel(logging.ERROR)\n@@ -39,8 +45,11 @@\n if not self._prepared:\n raise Exception('You need to call \"prepare\" before calling \"encode\" or \"decode\".')\n \n- img_tensors = self.prepare(images)\n+ img_tensors = [self._img_to_tensor(\n+ Image.open(img_path)\n+ ) for img_path in images]\n vec_arr = []\n+\n self.model.eval()\n with torch.no_grad():\n for img_tensor in img_tensors:\n", "issue": "`Img2VecEncoder` calls `self.prepare` twice\n## Your Environment\r\n* Python version: Python 3.8.10\r\n* Operating system: Ubuntu 20.04.3 LTS\r\n* Lightwood version: 1.3.0\r\n\r\n## Describe your issue\r\n`Img2VecEncoder` crashes when `.encode(images)` is called.\r\n\r\nInternally there seems to be some confusion on what the `.prepare` method actually does.\r\nFor some reason it is called to convert images to tensors, while in reality it should be used to initialize the model and (maybe) perform some initial training.\r\n\r\n## Fixing the issue\r\n\r\nImplement a method to convert images to torch tensors.\r\n\n", "before_files": [{"content": "import logging\nimport torch\nimport torchvision.transforms as transforms\nfrom lightwood.encoder.image.helpers.img_to_vec import Img2Vec\nfrom lightwood.encoder.base import BaseEncoder\n\n\nclass Img2VecEncoder(BaseEncoder):\n\n def __init__(self, is_target: bool = False):\n super().__init__(is_target)\n self.model = None\n # I think we should make this an enum, something like: speed, balance, accuracy\n self.aim = aim\n self._prepared = False\n\n self._scaler = transforms.Scale((224, 224))\n self._normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n self._to_tensor = transforms.ToTensor()\n\n pil_logger = logging.getLogger('PIL')\n pil_logger.setLevel(logging.ERROR)\n\n def prepare(self, priming_data):\n if self._prepared:\n raise Exception('You can only call \"prepare\" once for a given encoder.')\n\n if self.model is None:\n self.model = Img2Vec(model='resnext-50-small')\n self._prepared = True\n\n def encode(self, images):\n \"\"\"\n Encode list of images\n\n :images : list of images, each image is a path to a file or a url\n :return: a torch.floatTensor\n \"\"\"\n if not self._prepared:\n raise Exception('You need to call \"prepare\" before calling \"encode\" or \"decode\".')\n\n img_tensors = self.prepare(images)\n vec_arr = []\n self.model.eval()\n with torch.no_grad():\n for img_tensor in img_tensors:\n vec = self.model(img_tensor.unsqueeze(0), batch=False)\n vec_arr.append(vec)\n return torch.stack(vec_arr)\n\n def decode(self, encoded_values_tensor):\n raise Exception('This encoder is not bi-directional')\n", "path": "lightwood/encoder/image/img_2_vec.py"}], "after_files": [{"content": "import logging\nimport torch\nimport torchvision.transforms as transforms\nfrom lightwood.encoder.image.helpers.img_to_vec import Img2Vec\nfrom lightwood.encoder.base import BaseEncoder\nfrom PIL import Image\n\n\nclass Img2VecEncoder(BaseEncoder):\n\n def __init__(self, is_target: bool = False):\n super().__init__(is_target)\n self.model = None\n # # I think we should make this an enum, something like: speed, balance, accuracy\n # self.aim = aim\n self._prepared = False\n\n self._scaler = transforms.Resize((224, 224))\n self._normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n self._to_tensor = transforms.ToTensor()\n self._img_to_tensor = transforms.Compose([\n self._scaler,\n self._to_tensor,\n self._normalize\n ])\n\n pil_logger = logging.getLogger('PIL')\n pil_logger.setLevel(logging.ERROR)\n\n def prepare(self, priming_data):\n if self._prepared:\n raise Exception('You can only call \"prepare\" once for a given encoder.')\n\n if self.model is None:\n self.model = Img2Vec(model='resnext-50-small')\n self._prepared = True\n\n def encode(self, images):\n \"\"\"\n Encode list of images\n\n :images : list of images, each image is a path to a file or a url\n :return: a torch.floatTensor\n \"\"\"\n if not self._prepared:\n raise Exception('You need to call \"prepare\" before calling \"encode\" or \"decode\".')\n\n img_tensors = [self._img_to_tensor(\n Image.open(img_path)\n ) for img_path in images]\n vec_arr = []\n\n self.model.eval()\n with torch.no_grad():\n for img_tensor in img_tensors:\n vec = self.model(img_tensor.unsqueeze(0), batch=False)\n vec_arr.append(vec)\n return torch.stack(vec_arr)\n\n def decode(self, encoded_values_tensor):\n raise Exception('This encoder is not bi-directional')\n", "path": "lightwood/encoder/image/img_2_vec.py"}]} | 934 | 470 |
gh_patches_debug_33258 | rasdani/github-patches | git_diff | fidals__shopelectro-395 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
category_products.html:5: Implement pagination buttons...
The puzzle `302-bdb9bbef` from #302 has to be resolved:
https://github.com/fidals/shopelectro/blob/96e14747c3d7da9dd7db50f01bbb987147e4e2cb/templates/catalog/category_products.html#L5-L5
The puzzle was created by Artemiy on 08-Jun-18.
Estimate: 60 minutes,
If you have any technical questions, don't ask me, submit new tickets instead. The task will be "done" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `shopelectro/views/catalog.py`
Content:
```
1 from functools import partial
2
3 from django.conf import settings
4 from django.core.paginator import Paginator
5 from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
6 from django.shortcuts import render, get_object_or_404
7 from django.views.decorators.http import require_POST
8 from django_user_agents.utils import get_user_agent
9
10 from catalog.views import catalog
11 from images.models import Image
12 from pages import views as pages_views
13
14 from shopelectro import config
15 from shopelectro import models
16 from shopelectro.views.helpers import set_csrf_cookie
17
18 PRODUCTS_ON_PAGE_PC = 48
19 PRODUCTS_ON_PAGE_MOB = 12
20
21
22 def get_products_count(request):
23 """Calculate max products list size from request. List size depends on device type."""
24 mobile_view = get_user_agent(request).is_mobile
25 return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC
26
27
28 # CATALOG VIEWS
29 class CategoryTree(catalog.CategoryTree):
30 category_model = models.Category
31
32
33 @set_csrf_cookie
34 class ProductPage(catalog.ProductPage):
35 pk_url_kwarg = None
36 slug_url_kwarg = 'product_vendor_code'
37 slug_field = 'vendor_code'
38
39 queryset = (
40 models.Product.objects
41 .filter(category__isnull=False)
42 .prefetch_related('product_feedbacks', 'page__images')
43 .select_related('page')
44 )
45
46 def get_context_data(self, **kwargs):
47 context = super(ProductPage, self).get_context_data(**kwargs)
48
49 group_tags_pairs = (
50 models.Tag.objects
51 .filter(products=self.object)
52 .get_group_tags_pairs()
53 )
54
55 return {
56 **context,
57 'price_bounds': config.PRICE_BOUNDS,
58 'group_tags_pairs': group_tags_pairs
59 }
60
61
62 # SHOPELECTRO-SPECIFIC VIEWS
63 @set_csrf_cookie
64 class IndexPage(pages_views.CustomPageView):
65
66 def get_context_data(self, **kwargs):
67 """Extended method. Add product's images to context."""
68 context = super(IndexPage, self).get_context_data(**kwargs)
69 mobile_view = get_user_agent(self.request).is_mobile
70
71 top_products = (
72 models.Product.objects
73 .filter(id__in=settings.TOP_PRODUCTS)
74 .prefetch_related('category')
75 .select_related('page')
76 )
77
78 images = Image.objects.get_main_images_by_pages(
79 models.ProductPage.objects.filter(
80 shopelectro_product__in=top_products
81 )
82 )
83
84 categories = models.Category.objects.get_root_categories_by_products(
85 top_products)
86
87 prepared_top_products = []
88 if not mobile_view:
89 prepared_top_products = [
90 (product, images.get(product.page), categories.get(product))
91 for product in top_products
92 ]
93
94 return {
95 **context,
96 'category_tile': config.MAIN_PAGE_TILE,
97 'prepared_top_products': prepared_top_products,
98 }
99
100
101 def merge_products_and_images(products):
102 images = Image.objects.get_main_images_by_pages(
103 models.ProductPage.objects.filter(shopelectro_product__in=products)
104 )
105
106 return [
107 (product, images.get(product.page))
108 for product in products
109 ]
110
111
112 @set_csrf_cookie
113 class CategoryPage(catalog.CategoryPage):
114
115 def get_context_data(self, **kwargs):
116 """Add sorting options and view_types in context."""
117 context = super().get_context_data(**kwargs)
118 products_on_page = int(self.request.GET.get(
119 'step', get_products_count(self.request),
120 ))
121 page_number = int(self.request.GET.get('page', 1))
122 view_type = self.request.session.get('view_type', 'tile')
123 sorting = int(self.kwargs.get('sorting', 0))
124 sorting_option = config.category_sorting(sorting)
125 category = context['category']
126 if (
127 page_number < 1 or
128 products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS
129 ):
130 raise Http404('Page does not exist.')
131
132 all_products = (
133 models.Product.objects
134 .prefetch_related('page__images')
135 .select_related('page')
136 .get_by_category(category, ordering=(sorting_option, ))
137 )
138
139 group_tags_pairs = (
140 models.Tag.objects
141 .filter(products__in=all_products)
142 .get_group_tags_pairs()
143 )
144
145 tags = self.kwargs.get('tags')
146
147 tag_titles = ''
148 if tags:
149 slugs = models.Tag.parse_url_tags(tags)
150 tags = models.Tag.objects.filter(slug__in=slugs)
151
152 all_products = (
153 all_products
154 .filter(tags__in=tags)
155 # Use distinct because filtering by QuerySet tags,
156 # that related with products by many-to-many relation.
157 .distinct(sorting_option.lstrip('-'))
158 )
159
160 tag_titles = models.serialize_tags_to_title(tags)
161
162 def template_context(page, tag_titles, tags):
163 return {
164 'page': page,
165 'tag_titles': tag_titles,
166 'tags': tags,
167 }
168
169 page = context['page']
170 page.get_template_render_context = partial(
171 template_context, page, tag_titles, tags)
172
173 paginated_page = Paginator(all_products, products_on_page).page(page_number)
174 total_products = all_products.count()
175 products = paginated_page.object_list
176 if not products:
177 raise Http404('Page without products does not exist.')
178
179 return {
180 **context,
181 'product_image_pairs': merge_products_and_images(products),
182 'group_tags_pairs': group_tags_pairs,
183 'total_products': total_products,
184 'products_count': (page_number - 1) * products_on_page + products.count(),
185 'paginated_page': paginated_page,
186 'sorting_options': config.category_sorting(),
187 'limits': settings.CATEGORY_STEP_MULTIPLIERS,
188 'sort': sorting,
189 'tags': tags,
190 'view_type': view_type,
191 'skip_canonical': bool(tags),
192 }
193
194
195 def load_more(request, category_slug, offset=0, limit=0, sorting=0, tags=None):
196 """
197 Load more products of a given category.
198
199 :param sorting: preferred sorting index from CATEGORY_SORTING tuple
200 :param request: HttpRequest object
201 :param category_slug: Slug for a given category
202 :param offset: used for slicing QuerySet.
203 :return: products list in html format
204 """
205 products_on_page = limit or get_products_count(request)
206 offset = int(offset)
207 if offset < 0:
208 return HttpResponseBadRequest(
209 'The offset is wrong. An offset should be greater than or equal to 0.'
210 )
211 if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS:
212 return HttpResponseBadRequest(
213 'The limit number is wrong. List of available numbers:'
214 f' {", ".join(map(str, settings.CATEGORY_STEP_MULTIPLIERS))}'
215 )
216 # increment page number because:
217 # 11 // 12 = 0, 0 // 12 = 0 but it should be the first page
218 # 12 // 12 = 1, 23 // 12 = 1, but it should be the second page
219 page_number = (offset // products_on_page) + 1
220 category = get_object_or_404(models.CategoryPage, slug=category_slug).model
221 sorting_option = config.category_sorting(int(sorting))
222
223 all_products = (
224 models.Product.objects
225 .prefetch_related('page__images')
226 .select_related('page')
227 .get_by_category(category, ordering=(sorting_option,))
228 )
229
230 if tags:
231 tag_entities = models.Tag.objects.filter(
232 slug__in=models.Tag.parse_url_tags(tags)
233 )
234
235 all_products = (
236 all_products
237 .filter(tags__in=tag_entities)
238 # Use distinct because filtering by QuerySet tags,
239 # that related with products by many-to-many relation.
240 .distinct(sorting_option.lstrip('-'))
241 )
242
243 paginated_page = Paginator(all_products, products_on_page).page(page_number)
244 products = paginated_page.object_list
245 view = request.session.get('view_type', 'tile')
246
247 return render(request, 'catalog/category_products.html', {
248 'product_image_pairs': merge_products_and_images(products),
249 'paginated_page': paginated_page,
250 'view_type': view,
251 'prods': products_on_page,
252 })
253
254
255 @require_POST
256 def save_feedback(request):
257 def get_keys_from_post(*args):
258 return {arg: request.POST.get(arg, '') for arg in args}
259
260 product_id = request.POST.get('id')
261 product = models.Product.objects.filter(id=product_id).first()
262 if not (product_id and product):
263 return HttpResponse(status=422)
264
265 fields = ['rating', 'name', 'dignities', 'limitations', 'general']
266 feedback_data = get_keys_from_post(*fields)
267
268 models.ProductFeedback.objects.create(product=product, **feedback_data)
269 return HttpResponse('ok')
270
271
272 @require_POST
273 def delete_feedback(request):
274 if not request.user.is_authenticated:
275 return HttpResponseForbidden('Not today, sly guy...')
276
277 feedback_id = request.POST.get('id')
278 feedback = models.ProductFeedback.objects.filter(id=feedback_id).first()
279 if not (feedback_id and feedback):
280 return HttpResponse(status=422)
281
282 feedback.delete()
283 return HttpResponse('Feedback with id={} was deleted.'.format(feedback_id))
284
285
286 class ProductsWithoutImages(catalog.ProductsWithoutImages):
287 model = models.Product
288
289
290 class ProductsWithoutText(catalog.ProductsWithoutText):
291 model = models.Product
292
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/shopelectro/views/catalog.py b/shopelectro/views/catalog.py
--- a/shopelectro/views/catalog.py
+++ b/shopelectro/views/catalog.py
@@ -1,7 +1,7 @@
from functools import partial
from django.conf import settings
-from django.core.paginator import Paginator
+from django.core.paginator import Paginator, InvalidPage
from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden
from django.shortcuts import render, get_object_or_404
from django.views.decorators.http import require_POST
@@ -25,6 +25,13 @@
return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC
+def get_paginated_page_or_404(objects, per_page, page_number):
+ try:
+ return Paginator(objects, per_page).page(page_number)
+ except InvalidPage:
+ raise Http404('Page does not exist')
+
+
# CATALOG VIEWS
class CategoryTree(catalog.CategoryTree):
category_model = models.Category
@@ -170,7 +177,7 @@
page.get_template_render_context = partial(
template_context, page, tag_titles, tags)
- paginated_page = Paginator(all_products, products_on_page).page(page_number)
+ paginated_page = get_paginated_page_or_404(all_products, products_on_page, page_number)
total_products = all_products.count()
products = paginated_page.object_list
if not products:
@@ -240,7 +247,7 @@
.distinct(sorting_option.lstrip('-'))
)
- paginated_page = Paginator(all_products, products_on_page).page(page_number)
+ paginated_page = get_paginated_page_or_404(all_products, products_on_page, page_number)
products = paginated_page.object_list
view = request.session.get('view_type', 'tile')
| {"golden_diff": "diff --git a/shopelectro/views/catalog.py b/shopelectro/views/catalog.py\n--- a/shopelectro/views/catalog.py\n+++ b/shopelectro/views/catalog.py\n@@ -1,7 +1,7 @@\n from functools import partial\n \n from django.conf import settings\n-from django.core.paginator import Paginator\n+from django.core.paginator import Paginator, InvalidPage\n from django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\n from django.shortcuts import render, get_object_or_404\n from django.views.decorators.http import require_POST\n@@ -25,6 +25,13 @@\n return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC\n \n \n+def get_paginated_page_or_404(objects, per_page, page_number):\n+ try:\n+ return Paginator(objects, per_page).page(page_number)\n+ except InvalidPage:\n+ raise Http404('Page does not exist')\n+\n+\n # CATALOG VIEWS\n class CategoryTree(catalog.CategoryTree):\n category_model = models.Category\n@@ -170,7 +177,7 @@\n page.get_template_render_context = partial(\n template_context, page, tag_titles, tags)\n \n- paginated_page = Paginator(all_products, products_on_page).page(page_number)\n+ paginated_page = get_paginated_page_or_404(all_products, products_on_page, page_number)\n total_products = all_products.count()\n products = paginated_page.object_list\n if not products:\n@@ -240,7 +247,7 @@\n .distinct(sorting_option.lstrip('-'))\n )\n \n- paginated_page = Paginator(all_products, products_on_page).page(page_number)\n+ paginated_page = get_paginated_page_or_404(all_products, products_on_page, page_number)\n products = paginated_page.object_list\n view = request.session.get('view_type', 'tile')\n", "issue": "category_products.html:5: Implement pagination buttons...\nThe puzzle `302-bdb9bbef` from #302 has to be resolved:\n\nhttps://github.com/fidals/shopelectro/blob/96e14747c3d7da9dd7db50f01bbb987147e4e2cb/templates/catalog/category_products.html#L5-L5\n\nThe puzzle was created by Artemiy on 08-Jun-18. \n\nEstimate: 60 minutes, \n\nIf you have any technical questions, don't ask me, submit new tickets instead. The task will be \"done\" when the problem is fixed and the text of the puzzle is _removed_ from the source code. Here is more about [PDD](http://www.yegor256.com/2009/03/04/pdd.html) and [about me](http://www.yegor256.com/2017/04/05/pdd-in-action.html).\n", "before_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.core.paginator import Paginator\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django_user_agents.utils import get_user_agent\n\nfrom catalog.views import catalog\nfrom images.models import Image\nfrom pages import views as pages_views\n\nfrom shopelectro import config\nfrom shopelectro import models\nfrom shopelectro.views.helpers import set_csrf_cookie\n\nPRODUCTS_ON_PAGE_PC = 48\nPRODUCTS_ON_PAGE_MOB = 12\n\n\ndef get_products_count(request):\n \"\"\"Calculate max products list size from request. List size depends on device type.\"\"\"\n mobile_view = get_user_agent(request).is_mobile\n return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC\n\n\n# CATALOG VIEWS\nclass CategoryTree(catalog.CategoryTree):\n category_model = models.Category\n\n\n@set_csrf_cookie\nclass ProductPage(catalog.ProductPage):\n pk_url_kwarg = None\n slug_url_kwarg = 'product_vendor_code'\n slug_field = 'vendor_code'\n\n queryset = (\n models.Product.objects\n .filter(category__isnull=False)\n .prefetch_related('product_feedbacks', 'page__images')\n .select_related('page')\n )\n\n def get_context_data(self, **kwargs):\n context = super(ProductPage, self).get_context_data(**kwargs)\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products=self.object)\n .get_group_tags_pairs()\n )\n\n return {\n **context,\n 'price_bounds': config.PRICE_BOUNDS,\n 'group_tags_pairs': group_tags_pairs\n }\n\n\n# SHOPELECTRO-SPECIFIC VIEWS\n@set_csrf_cookie\nclass IndexPage(pages_views.CustomPageView):\n\n def get_context_data(self, **kwargs):\n \"\"\"Extended method. Add product's images to context.\"\"\"\n context = super(IndexPage, self).get_context_data(**kwargs)\n mobile_view = get_user_agent(self.request).is_mobile\n\n top_products = (\n models.Product.objects\n .filter(id__in=settings.TOP_PRODUCTS)\n .prefetch_related('category')\n .select_related('page')\n )\n\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(\n shopelectro_product__in=top_products\n )\n )\n\n categories = models.Category.objects.get_root_categories_by_products(\n top_products)\n\n prepared_top_products = []\n if not mobile_view:\n prepared_top_products = [\n (product, images.get(product.page), categories.get(product))\n for product in top_products\n ]\n\n return {\n **context,\n 'category_tile': config.MAIN_PAGE_TILE,\n 'prepared_top_products': prepared_top_products,\n }\n\n\ndef merge_products_and_images(products):\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(shopelectro_product__in=products)\n )\n\n return [\n (product, images.get(product.page))\n for product in products\n ]\n\n\n@set_csrf_cookie\nclass CategoryPage(catalog.CategoryPage):\n\n def get_context_data(self, **kwargs):\n \"\"\"Add sorting options and view_types in context.\"\"\"\n context = super().get_context_data(**kwargs)\n products_on_page = int(self.request.GET.get(\n 'step', get_products_count(self.request),\n ))\n page_number = int(self.request.GET.get('page', 1))\n view_type = self.request.session.get('view_type', 'tile')\n sorting = int(self.kwargs.get('sorting', 0))\n sorting_option = config.category_sorting(sorting)\n category = context['category']\n if (\n page_number < 1 or\n products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS\n ):\n raise Http404('Page does not exist.')\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option, ))\n )\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products__in=all_products)\n .get_group_tags_pairs()\n )\n\n tags = self.kwargs.get('tags')\n\n tag_titles = ''\n if tags:\n slugs = models.Tag.parse_url_tags(tags)\n tags = models.Tag.objects.filter(slug__in=slugs)\n\n all_products = (\n all_products\n .filter(tags__in=tags)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n tag_titles = models.serialize_tags_to_title(tags)\n\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n page = context['page']\n page.get_template_render_context = partial(\n template_context, page, tag_titles, tags)\n\n paginated_page = Paginator(all_products, products_on_page).page(page_number)\n total_products = all_products.count()\n products = paginated_page.object_list\n if not products:\n raise Http404('Page without products does not exist.')\n\n return {\n **context,\n 'product_image_pairs': merge_products_and_images(products),\n 'group_tags_pairs': group_tags_pairs,\n 'total_products': total_products,\n 'products_count': (page_number - 1) * products_on_page + products.count(),\n 'paginated_page': paginated_page,\n 'sorting_options': config.category_sorting(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': sorting,\n 'tags': tags,\n 'view_type': view_type,\n 'skip_canonical': bool(tags),\n }\n\n\ndef load_more(request, category_slug, offset=0, limit=0, sorting=0, tags=None):\n \"\"\"\n Load more products of a given category.\n\n :param sorting: preferred sorting index from CATEGORY_SORTING tuple\n :param request: HttpRequest object\n :param category_slug: Slug for a given category\n :param offset: used for slicing QuerySet.\n :return: products list in html format\n \"\"\"\n products_on_page = limit or get_products_count(request)\n offset = int(offset)\n if offset < 0:\n return HttpResponseBadRequest(\n 'The offset is wrong. An offset should be greater than or equal to 0.'\n )\n if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS:\n return HttpResponseBadRequest(\n 'The limit number is wrong. List of available numbers:'\n f' {\", \".join(map(str, settings.CATEGORY_STEP_MULTIPLIERS))}'\n )\n # increment page number because:\n # 11 // 12 = 0, 0 // 12 = 0 but it should be the first page\n # 12 // 12 = 1, 23 // 12 = 1, but it should be the second page\n page_number = (offset // products_on_page) + 1\n category = get_object_or_404(models.CategoryPage, slug=category_slug).model\n sorting_option = config.category_sorting(int(sorting))\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option,))\n )\n\n if tags:\n tag_entities = models.Tag.objects.filter(\n slug__in=models.Tag.parse_url_tags(tags)\n )\n\n all_products = (\n all_products\n .filter(tags__in=tag_entities)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n paginated_page = Paginator(all_products, products_on_page).page(page_number)\n products = paginated_page.object_list\n view = request.session.get('view_type', 'tile')\n\n return render(request, 'catalog/category_products.html', {\n 'product_image_pairs': merge_products_and_images(products),\n 'paginated_page': paginated_page,\n 'view_type': view,\n 'prods': products_on_page,\n })\n\n\n@require_POST\ndef save_feedback(request):\n def get_keys_from_post(*args):\n return {arg: request.POST.get(arg, '') for arg in args}\n\n product_id = request.POST.get('id')\n product = models.Product.objects.filter(id=product_id).first()\n if not (product_id and product):\n return HttpResponse(status=422)\n\n fields = ['rating', 'name', 'dignities', 'limitations', 'general']\n feedback_data = get_keys_from_post(*fields)\n\n models.ProductFeedback.objects.create(product=product, **feedback_data)\n return HttpResponse('ok')\n\n\n@require_POST\ndef delete_feedback(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden('Not today, sly guy...')\n\n feedback_id = request.POST.get('id')\n feedback = models.ProductFeedback.objects.filter(id=feedback_id).first()\n if not (feedback_id and feedback):\n return HttpResponse(status=422)\n\n feedback.delete()\n return HttpResponse('Feedback with id={} was deleted.'.format(feedback_id))\n\n\nclass ProductsWithoutImages(catalog.ProductsWithoutImages):\n model = models.Product\n\n\nclass ProductsWithoutText(catalog.ProductsWithoutText):\n model = models.Product\n", "path": "shopelectro/views/catalog.py"}], "after_files": [{"content": "from functools import partial\n\nfrom django.conf import settings\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.http import Http404, HttpResponse, HttpResponseBadRequest, HttpResponseForbidden\nfrom django.shortcuts import render, get_object_or_404\nfrom django.views.decorators.http import require_POST\nfrom django_user_agents.utils import get_user_agent\n\nfrom catalog.views import catalog\nfrom images.models import Image\nfrom pages import views as pages_views\n\nfrom shopelectro import config\nfrom shopelectro import models\nfrom shopelectro.views.helpers import set_csrf_cookie\n\nPRODUCTS_ON_PAGE_PC = 48\nPRODUCTS_ON_PAGE_MOB = 12\n\n\ndef get_products_count(request):\n \"\"\"Calculate max products list size from request. List size depends on device type.\"\"\"\n mobile_view = get_user_agent(request).is_mobile\n return PRODUCTS_ON_PAGE_MOB if mobile_view else PRODUCTS_ON_PAGE_PC\n\n\ndef get_paginated_page_or_404(objects, per_page, page_number):\n try:\n return Paginator(objects, per_page).page(page_number)\n except InvalidPage:\n raise Http404('Page does not exist')\n\n\n# CATALOG VIEWS\nclass CategoryTree(catalog.CategoryTree):\n category_model = models.Category\n\n\n@set_csrf_cookie\nclass ProductPage(catalog.ProductPage):\n pk_url_kwarg = None\n slug_url_kwarg = 'product_vendor_code'\n slug_field = 'vendor_code'\n\n queryset = (\n models.Product.objects\n .filter(category__isnull=False)\n .prefetch_related('product_feedbacks', 'page__images')\n .select_related('page')\n )\n\n def get_context_data(self, **kwargs):\n context = super(ProductPage, self).get_context_data(**kwargs)\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products=self.object)\n .get_group_tags_pairs()\n )\n\n return {\n **context,\n 'price_bounds': config.PRICE_BOUNDS,\n 'group_tags_pairs': group_tags_pairs\n }\n\n\n# SHOPELECTRO-SPECIFIC VIEWS\n@set_csrf_cookie\nclass IndexPage(pages_views.CustomPageView):\n\n def get_context_data(self, **kwargs):\n \"\"\"Extended method. Add product's images to context.\"\"\"\n context = super(IndexPage, self).get_context_data(**kwargs)\n mobile_view = get_user_agent(self.request).is_mobile\n\n top_products = (\n models.Product.objects\n .filter(id__in=settings.TOP_PRODUCTS)\n .prefetch_related('category')\n .select_related('page')\n )\n\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(\n shopelectro_product__in=top_products\n )\n )\n\n categories = models.Category.objects.get_root_categories_by_products(\n top_products)\n\n prepared_top_products = []\n if not mobile_view:\n prepared_top_products = [\n (product, images.get(product.page), categories.get(product))\n for product in top_products\n ]\n\n return {\n **context,\n 'category_tile': config.MAIN_PAGE_TILE,\n 'prepared_top_products': prepared_top_products,\n }\n\n\ndef merge_products_and_images(products):\n images = Image.objects.get_main_images_by_pages(\n models.ProductPage.objects.filter(shopelectro_product__in=products)\n )\n\n return [\n (product, images.get(product.page))\n for product in products\n ]\n\n\n@set_csrf_cookie\nclass CategoryPage(catalog.CategoryPage):\n\n def get_context_data(self, **kwargs):\n \"\"\"Add sorting options and view_types in context.\"\"\"\n context = super().get_context_data(**kwargs)\n products_on_page = int(self.request.GET.get(\n 'step', get_products_count(self.request),\n ))\n page_number = int(self.request.GET.get('page', 1))\n view_type = self.request.session.get('view_type', 'tile')\n sorting = int(self.kwargs.get('sorting', 0))\n sorting_option = config.category_sorting(sorting)\n category = context['category']\n if (\n page_number < 1 or\n products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS\n ):\n raise Http404('Page does not exist.')\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option, ))\n )\n\n group_tags_pairs = (\n models.Tag.objects\n .filter(products__in=all_products)\n .get_group_tags_pairs()\n )\n\n tags = self.kwargs.get('tags')\n\n tag_titles = ''\n if tags:\n slugs = models.Tag.parse_url_tags(tags)\n tags = models.Tag.objects.filter(slug__in=slugs)\n\n all_products = (\n all_products\n .filter(tags__in=tags)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n tag_titles = models.serialize_tags_to_title(tags)\n\n def template_context(page, tag_titles, tags):\n return {\n 'page': page,\n 'tag_titles': tag_titles,\n 'tags': tags,\n }\n\n page = context['page']\n page.get_template_render_context = partial(\n template_context, page, tag_titles, tags)\n\n paginated_page = get_paginated_page_or_404(all_products, products_on_page, page_number)\n total_products = all_products.count()\n products = paginated_page.object_list\n if not products:\n raise Http404('Page without products does not exist.')\n\n return {\n **context,\n 'product_image_pairs': merge_products_and_images(products),\n 'group_tags_pairs': group_tags_pairs,\n 'total_products': total_products,\n 'products_count': (page_number - 1) * products_on_page + products.count(),\n 'paginated_page': paginated_page,\n 'sorting_options': config.category_sorting(),\n 'limits': settings.CATEGORY_STEP_MULTIPLIERS,\n 'sort': sorting,\n 'tags': tags,\n 'view_type': view_type,\n 'skip_canonical': bool(tags),\n }\n\n\ndef load_more(request, category_slug, offset=0, limit=0, sorting=0, tags=None):\n \"\"\"\n Load more products of a given category.\n\n :param sorting: preferred sorting index from CATEGORY_SORTING tuple\n :param request: HttpRequest object\n :param category_slug: Slug for a given category\n :param offset: used for slicing QuerySet.\n :return: products list in html format\n \"\"\"\n products_on_page = limit or get_products_count(request)\n offset = int(offset)\n if offset < 0:\n return HttpResponseBadRequest(\n 'The offset is wrong. An offset should be greater than or equal to 0.'\n )\n if products_on_page not in settings.CATEGORY_STEP_MULTIPLIERS:\n return HttpResponseBadRequest(\n 'The limit number is wrong. List of available numbers:'\n f' {\", \".join(map(str, settings.CATEGORY_STEP_MULTIPLIERS))}'\n )\n # increment page number because:\n # 11 // 12 = 0, 0 // 12 = 0 but it should be the first page\n # 12 // 12 = 1, 23 // 12 = 1, but it should be the second page\n page_number = (offset // products_on_page) + 1\n category = get_object_or_404(models.CategoryPage, slug=category_slug).model\n sorting_option = config.category_sorting(int(sorting))\n\n all_products = (\n models.Product.objects\n .prefetch_related('page__images')\n .select_related('page')\n .get_by_category(category, ordering=(sorting_option,))\n )\n\n if tags:\n tag_entities = models.Tag.objects.filter(\n slug__in=models.Tag.parse_url_tags(tags)\n )\n\n all_products = (\n all_products\n .filter(tags__in=tag_entities)\n # Use distinct because filtering by QuerySet tags,\n # that related with products by many-to-many relation.\n .distinct(sorting_option.lstrip('-'))\n )\n\n paginated_page = get_paginated_page_or_404(all_products, products_on_page, page_number)\n products = paginated_page.object_list\n view = request.session.get('view_type', 'tile')\n\n return render(request, 'catalog/category_products.html', {\n 'product_image_pairs': merge_products_and_images(products),\n 'paginated_page': paginated_page,\n 'view_type': view,\n 'prods': products_on_page,\n })\n\n\n@require_POST\ndef save_feedback(request):\n def get_keys_from_post(*args):\n return {arg: request.POST.get(arg, '') for arg in args}\n\n product_id = request.POST.get('id')\n product = models.Product.objects.filter(id=product_id).first()\n if not (product_id and product):\n return HttpResponse(status=422)\n\n fields = ['rating', 'name', 'dignities', 'limitations', 'general']\n feedback_data = get_keys_from_post(*fields)\n\n models.ProductFeedback.objects.create(product=product, **feedback_data)\n return HttpResponse('ok')\n\n\n@require_POST\ndef delete_feedback(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden('Not today, sly guy...')\n\n feedback_id = request.POST.get('id')\n feedback = models.ProductFeedback.objects.filter(id=feedback_id).first()\n if not (feedback_id and feedback):\n return HttpResponse(status=422)\n\n feedback.delete()\n return HttpResponse('Feedback with id={} was deleted.'.format(feedback_id))\n\n\nclass ProductsWithoutImages(catalog.ProductsWithoutImages):\n model = models.Product\n\n\nclass ProductsWithoutText(catalog.ProductsWithoutText):\n model = models.Product\n", "path": "shopelectro/views/catalog.py"}]} | 3,338 | 419 |
gh_patches_debug_7123 | rasdani/github-patches | git_diff | optuna__optuna-4133 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`None` categorical not visible on Slice plot
### Expected behavior
I have a categorical like this: `trial.suggest_categorical("class_weight", ["balanced", None])`
The slice plot shows the "balanced" value but not the `None` value.
I could write a workaround by using `"None"` as a string and then convert it to `None`
but I thing it could be nice if the real `None` is ploted.
See sceenshot:
<img width="234" alt="image" src="https://user-images.githubusercontent.com/229382/199188383-981f256d-0b66-4a1c-be40-68ecd6ae4528.png">
### Environment
- Optuna version:3.0.3
- Python version:3.9.13
- OS:Linux-5.10.0-17-amd64-x86_64-with-glibc2.31
### Error messages, stack traces, or logs
```shell
see screenshot
```
### Steps to reproduce
see description above
### Additional context (optional)
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/visualization/_slice.py`
Content:
```
1 from typing import Any
2 from typing import Callable
3 from typing import cast
4 from typing import List
5 from typing import NamedTuple
6 from typing import Optional
7
8 from optuna.logging import get_logger
9 from optuna.study import Study
10 from optuna.trial import FrozenTrial
11 from optuna.trial import TrialState
12 from optuna.visualization._plotly_imports import _imports
13 from optuna.visualization._utils import _check_plot_args
14 from optuna.visualization._utils import _filter_nonfinite
15 from optuna.visualization._utils import _is_log_scale
16 from optuna.visualization._utils import _is_numerical
17
18
19 if _imports.is_successful():
20 from optuna.visualization._plotly_imports import go
21 from optuna.visualization._plotly_imports import make_subplots
22 from optuna.visualization._plotly_imports import Scatter
23 from optuna.visualization._utils import COLOR_SCALE
24
25 _logger = get_logger(__name__)
26
27
28 class _SliceSubplotInfo(NamedTuple):
29 param_name: str
30 x: List[Any]
31 y: List[float]
32 trial_numbers: List[int]
33 is_log: bool
34 is_numerical: bool
35
36
37 class _SlicePlotInfo(NamedTuple):
38 target_name: str
39 subplots: List[_SliceSubplotInfo]
40
41
42 def _get_slice_subplot_info(
43 trials: List[FrozenTrial],
44 param: str,
45 target: Optional[Callable[[FrozenTrial], float]],
46 log_scale: bool,
47 numerical: bool,
48 ) -> _SliceSubplotInfo:
49
50 if target is None:
51
52 def _target(t: FrozenTrial) -> float:
53 return cast(float, t.value)
54
55 target = _target
56
57 return _SliceSubplotInfo(
58 param_name=param,
59 x=[t.params[param] for t in trials if param in t.params],
60 y=[target(t) for t in trials if param in t.params],
61 trial_numbers=[t.number for t in trials if param in t.params],
62 is_log=log_scale,
63 is_numerical=numerical,
64 )
65
66
67 def _get_slice_plot_info(
68 study: Study,
69 params: Optional[List[str]],
70 target: Optional[Callable[[FrozenTrial], float]],
71 target_name: str,
72 ) -> _SlicePlotInfo:
73
74 _check_plot_args(study, target, target_name)
75
76 trials = _filter_nonfinite(
77 study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target
78 )
79
80 if len(trials) == 0:
81 _logger.warning("Your study does not have any completed trials.")
82 return _SlicePlotInfo(target_name, [])
83
84 all_params = {p_name for t in trials for p_name in t.params.keys()}
85 if params is None:
86 sorted_params = sorted(all_params)
87 else:
88 for input_p_name in params:
89 if input_p_name not in all_params:
90 raise ValueError(f"Parameter {input_p_name} does not exist in your study.")
91 sorted_params = sorted(set(params))
92
93 return _SlicePlotInfo(
94 target_name=target_name,
95 subplots=[
96 _get_slice_subplot_info(
97 trials=trials,
98 param=param,
99 target=target,
100 log_scale=_is_log_scale(trials, param),
101 numerical=_is_numerical(trials, param),
102 )
103 for param in sorted_params
104 ],
105 )
106
107
108 def plot_slice(
109 study: Study,
110 params: Optional[List[str]] = None,
111 *,
112 target: Optional[Callable[[FrozenTrial], float]] = None,
113 target_name: str = "Objective Value",
114 ) -> "go.Figure":
115 """Plot the parameter relationship as slice plot in a study.
116
117 Note that, if a parameter contains missing values, a trial with missing values is not plotted.
118
119 Example:
120
121 The following code snippet shows how to plot the parameter relationship as slice plot.
122
123 .. plotly::
124
125 import optuna
126
127
128 def objective(trial):
129 x = trial.suggest_float("x", -100, 100)
130 y = trial.suggest_categorical("y", [-1, 0, 1])
131 return x ** 2 + y
132
133
134 sampler = optuna.samplers.TPESampler(seed=10)
135 study = optuna.create_study(sampler=sampler)
136 study.optimize(objective, n_trials=10)
137
138 fig = optuna.visualization.plot_slice(study, params=["x", "y"])
139 fig.show()
140
141 Args:
142 study:
143 A :class:`~optuna.study.Study` object whose trials are plotted for their target values.
144 params:
145 Parameter list to visualize. The default is all parameters.
146 target:
147 A function to specify the value to display. If it is :obj:`None` and ``study`` is being
148 used for single-objective optimization, the objective values are plotted.
149
150 .. note::
151 Specify this argument if ``study`` is being used for multi-objective optimization.
152 target_name:
153 Target's name to display on the axis label.
154
155 Returns:
156 A :class:`plotly.graph_objs.Figure` object.
157 """
158
159 _imports.check()
160 return _get_slice_plot(_get_slice_plot_info(study, params, target, target_name))
161
162
163 def _get_slice_plot(info: _SlicePlotInfo) -> "go.Figure":
164
165 layout = go.Layout(title="Slice Plot")
166
167 if len(info.subplots) == 0:
168 return go.Figure(data=[], layout=layout)
169 elif len(info.subplots) == 1:
170 figure = go.Figure(data=[_generate_slice_subplot(info.subplots[0])], layout=layout)
171 figure.update_xaxes(title_text=info.subplots[0].param_name)
172 figure.update_yaxes(title_text=info.target_name)
173 if info.subplots[0].is_log:
174 figure.update_xaxes(type="log")
175 else:
176 figure = make_subplots(rows=1, cols=len(info.subplots), shared_yaxes=True)
177 figure.update_layout(layout)
178 showscale = True # showscale option only needs to be specified once.
179 for column_index, subplot_info in enumerate(info.subplots, start=1):
180 trace = _generate_slice_subplot(subplot_info)
181 trace.update(marker={"showscale": showscale}) # showscale's default is True.
182 if showscale:
183 showscale = False
184 figure.add_trace(trace, row=1, col=column_index)
185 figure.update_xaxes(title_text=subplot_info.param_name, row=1, col=column_index)
186 if column_index == 1:
187 figure.update_yaxes(title_text=info.target_name, row=1, col=column_index)
188 if subplot_info.is_log:
189 figure.update_xaxes(type="log", row=1, col=column_index)
190 if len(info.subplots) > 3:
191 # Ensure that each subplot has a minimum width without relying on autusizing.
192 figure.update_layout(width=300 * len(info.subplots))
193
194 return figure
195
196
197 def _generate_slice_subplot(subplot_info: _SliceSubplotInfo) -> "Scatter":
198 return go.Scatter(
199 x=subplot_info.x,
200 y=subplot_info.y,
201 mode="markers",
202 marker={
203 "line": {"width": 0.5, "color": "Grey"},
204 "color": subplot_info.trial_numbers,
205 "colorscale": COLOR_SCALE,
206 "colorbar": {
207 "title": "Trial",
208 "x": 1.0, # Offset the colorbar position with a fixed width `xpad`.
209 "xpad": 40,
210 },
211 },
212 showlegend=False,
213 )
214
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/optuna/visualization/_slice.py b/optuna/visualization/_slice.py
--- a/optuna/visualization/_slice.py
+++ b/optuna/visualization/_slice.py
@@ -195,9 +195,12 @@
def _generate_slice_subplot(subplot_info: _SliceSubplotInfo) -> "Scatter":
+ x = [x if x is not None else "None" for x in subplot_info.x]
+ y = [y if y is not None else "None" for y in subplot_info.y]
+
return go.Scatter(
- x=subplot_info.x,
- y=subplot_info.y,
+ x=x,
+ y=y,
mode="markers",
marker={
"line": {"width": 0.5, "color": "Grey"},
| {"golden_diff": "diff --git a/optuna/visualization/_slice.py b/optuna/visualization/_slice.py\n--- a/optuna/visualization/_slice.py\n+++ b/optuna/visualization/_slice.py\n@@ -195,9 +195,12 @@\n \n \n def _generate_slice_subplot(subplot_info: _SliceSubplotInfo) -> \"Scatter\":\n+ x = [x if x is not None else \"None\" for x in subplot_info.x]\n+ y = [y if y is not None else \"None\" for y in subplot_info.y]\n+\n return go.Scatter(\n- x=subplot_info.x,\n- y=subplot_info.y,\n+ x=x,\n+ y=y,\n mode=\"markers\",\n marker={\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n", "issue": "`None` categorical not visible on Slice plot\n### Expected behavior\r\n\r\nI have a categorical like this: `trial.suggest_categorical(\"class_weight\", [\"balanced\", None])`\r\n\r\nThe slice plot shows the \"balanced\" value but not the `None` value.\r\nI could write a workaround by using `\"None\"` as a string and then convert it to `None`\r\nbut I thing it could be nice if the real `None` is ploted.\r\n\r\nSee sceenshot:\r\n\r\n<img width=\"234\" alt=\"image\" src=\"https://user-images.githubusercontent.com/229382/199188383-981f256d-0b66-4a1c-be40-68ecd6ae4528.png\">\r\n\r\n\r\n### Environment\r\n\r\n- Optuna version:3.0.3\r\n- Python version:3.9.13\r\n- OS:Linux-5.10.0-17-amd64-x86_64-with-glibc2.31\r\n\r\n### Error messages, stack traces, or logs\r\n\r\n```shell\r\nsee screenshot\r\n```\r\n\r\n\r\n### Steps to reproduce\r\n\r\nsee description above\r\n\r\n### Additional context (optional)\r\n\r\n_No response_\n", "before_files": [{"content": "from typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\n\nfrom optuna.logging import get_logger\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\nfrom optuna.visualization._utils import _check_plot_args\nfrom optuna.visualization._utils import _filter_nonfinite\nfrom optuna.visualization._utils import _is_log_scale\nfrom optuna.visualization._utils import _is_numerical\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n from optuna.visualization._plotly_imports import make_subplots\n from optuna.visualization._plotly_imports import Scatter\n from optuna.visualization._utils import COLOR_SCALE\n\n_logger = get_logger(__name__)\n\n\nclass _SliceSubplotInfo(NamedTuple):\n param_name: str\n x: List[Any]\n y: List[float]\n trial_numbers: List[int]\n is_log: bool\n is_numerical: bool\n\n\nclass _SlicePlotInfo(NamedTuple):\n target_name: str\n subplots: List[_SliceSubplotInfo]\n\n\ndef _get_slice_subplot_info(\n trials: List[FrozenTrial],\n param: str,\n target: Optional[Callable[[FrozenTrial], float]],\n log_scale: bool,\n numerical: bool,\n) -> _SliceSubplotInfo:\n\n if target is None:\n\n def _target(t: FrozenTrial) -> float:\n return cast(float, t.value)\n\n target = _target\n\n return _SliceSubplotInfo(\n param_name=param,\n x=[t.params[param] for t in trials if param in t.params],\n y=[target(t) for t in trials if param in t.params],\n trial_numbers=[t.number for t in trials if param in t.params],\n is_log=log_scale,\n is_numerical=numerical,\n )\n\n\ndef _get_slice_plot_info(\n study: Study,\n params: Optional[List[str]],\n target: Optional[Callable[[FrozenTrial], float]],\n target_name: str,\n) -> _SlicePlotInfo:\n\n _check_plot_args(study, target, target_name)\n\n trials = _filter_nonfinite(\n study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target\n )\n\n if len(trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n return _SlicePlotInfo(target_name, [])\n\n all_params = {p_name for t in trials for p_name in t.params.keys()}\n if params is None:\n sorted_params = sorted(all_params)\n else:\n for input_p_name in params:\n if input_p_name not in all_params:\n raise ValueError(f\"Parameter {input_p_name} does not exist in your study.\")\n sorted_params = sorted(set(params))\n\n return _SlicePlotInfo(\n target_name=target_name,\n subplots=[\n _get_slice_subplot_info(\n trials=trials,\n param=param,\n target=target,\n log_scale=_is_log_scale(trials, param),\n numerical=_is_numerical(trials, param),\n )\n for param in sorted_params\n ],\n )\n\n\ndef plot_slice(\n study: Study,\n params: Optional[List[str]] = None,\n *,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"go.Figure\":\n \"\"\"Plot the parameter relationship as slice plot in a study.\n\n Note that, if a parameter contains missing values, a trial with missing values is not plotted.\n\n Example:\n\n The following code snippet shows how to plot the parameter relationship as slice plot.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_categorical(\"y\", [-1, 0, 1])\n return x ** 2 + y\n\n\n sampler = optuna.samplers.TPESampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=10)\n\n fig = optuna.visualization.plot_slice(study, params=[\"x\", \"y\"])\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their target values.\n params:\n Parameter list to visualize. The default is all parameters.\n target:\n A function to specify the value to display. If it is :obj:`None` and ``study`` is being\n used for single-objective optimization, the objective values are plotted.\n\n .. note::\n Specify this argument if ``study`` is being used for multi-objective optimization.\n target_name:\n Target's name to display on the axis label.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _imports.check()\n return _get_slice_plot(_get_slice_plot_info(study, params, target, target_name))\n\n\ndef _get_slice_plot(info: _SlicePlotInfo) -> \"go.Figure\":\n\n layout = go.Layout(title=\"Slice Plot\")\n\n if len(info.subplots) == 0:\n return go.Figure(data=[], layout=layout)\n elif len(info.subplots) == 1:\n figure = go.Figure(data=[_generate_slice_subplot(info.subplots[0])], layout=layout)\n figure.update_xaxes(title_text=info.subplots[0].param_name)\n figure.update_yaxes(title_text=info.target_name)\n if info.subplots[0].is_log:\n figure.update_xaxes(type=\"log\")\n else:\n figure = make_subplots(rows=1, cols=len(info.subplots), shared_yaxes=True)\n figure.update_layout(layout)\n showscale = True # showscale option only needs to be specified once.\n for column_index, subplot_info in enumerate(info.subplots, start=1):\n trace = _generate_slice_subplot(subplot_info)\n trace.update(marker={\"showscale\": showscale}) # showscale's default is True.\n if showscale:\n showscale = False\n figure.add_trace(trace, row=1, col=column_index)\n figure.update_xaxes(title_text=subplot_info.param_name, row=1, col=column_index)\n if column_index == 1:\n figure.update_yaxes(title_text=info.target_name, row=1, col=column_index)\n if subplot_info.is_log:\n figure.update_xaxes(type=\"log\", row=1, col=column_index)\n if len(info.subplots) > 3:\n # Ensure that each subplot has a minimum width without relying on autusizing.\n figure.update_layout(width=300 * len(info.subplots))\n\n return figure\n\n\ndef _generate_slice_subplot(subplot_info: _SliceSubplotInfo) -> \"Scatter\":\n return go.Scatter(\n x=subplot_info.x,\n y=subplot_info.y,\n mode=\"markers\",\n marker={\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n \"color\": subplot_info.trial_numbers,\n \"colorscale\": COLOR_SCALE,\n \"colorbar\": {\n \"title\": \"Trial\",\n \"x\": 1.0, # Offset the colorbar position with a fixed width `xpad`.\n \"xpad\": 40,\n },\n },\n showlegend=False,\n )\n", "path": "optuna/visualization/_slice.py"}], "after_files": [{"content": "from typing import Any\nfrom typing import Callable\nfrom typing import cast\nfrom typing import List\nfrom typing import NamedTuple\nfrom typing import Optional\n\nfrom optuna.logging import get_logger\nfrom optuna.study import Study\nfrom optuna.trial import FrozenTrial\nfrom optuna.trial import TrialState\nfrom optuna.visualization._plotly_imports import _imports\nfrom optuna.visualization._utils import _check_plot_args\nfrom optuna.visualization._utils import _filter_nonfinite\nfrom optuna.visualization._utils import _is_log_scale\nfrom optuna.visualization._utils import _is_numerical\n\n\nif _imports.is_successful():\n from optuna.visualization._plotly_imports import go\n from optuna.visualization._plotly_imports import make_subplots\n from optuna.visualization._plotly_imports import Scatter\n from optuna.visualization._utils import COLOR_SCALE\n\n_logger = get_logger(__name__)\n\n\nclass _SliceSubplotInfo(NamedTuple):\n param_name: str\n x: List[Any]\n y: List[float]\n trial_numbers: List[int]\n is_log: bool\n is_numerical: bool\n\n\nclass _SlicePlotInfo(NamedTuple):\n target_name: str\n subplots: List[_SliceSubplotInfo]\n\n\ndef _get_slice_subplot_info(\n trials: List[FrozenTrial],\n param: str,\n target: Optional[Callable[[FrozenTrial], float]],\n log_scale: bool,\n numerical: bool,\n) -> _SliceSubplotInfo:\n\n if target is None:\n\n def _target(t: FrozenTrial) -> float:\n return cast(float, t.value)\n\n target = _target\n\n return _SliceSubplotInfo(\n param_name=param,\n x=[t.params[param] for t in trials if param in t.params],\n y=[target(t) for t in trials if param in t.params],\n trial_numbers=[t.number for t in trials if param in t.params],\n is_log=log_scale,\n is_numerical=numerical,\n )\n\n\ndef _get_slice_plot_info(\n study: Study,\n params: Optional[List[str]],\n target: Optional[Callable[[FrozenTrial], float]],\n target_name: str,\n) -> _SlicePlotInfo:\n\n _check_plot_args(study, target, target_name)\n\n trials = _filter_nonfinite(\n study.get_trials(deepcopy=False, states=(TrialState.COMPLETE,)), target=target\n )\n\n if len(trials) == 0:\n _logger.warning(\"Your study does not have any completed trials.\")\n return _SlicePlotInfo(target_name, [])\n\n all_params = {p_name for t in trials for p_name in t.params.keys()}\n if params is None:\n sorted_params = sorted(all_params)\n else:\n for input_p_name in params:\n if input_p_name not in all_params:\n raise ValueError(f\"Parameter {input_p_name} does not exist in your study.\")\n sorted_params = sorted(set(params))\n\n return _SlicePlotInfo(\n target_name=target_name,\n subplots=[\n _get_slice_subplot_info(\n trials=trials,\n param=param,\n target=target,\n log_scale=_is_log_scale(trials, param),\n numerical=_is_numerical(trials, param),\n )\n for param in sorted_params\n ],\n )\n\n\ndef plot_slice(\n study: Study,\n params: Optional[List[str]] = None,\n *,\n target: Optional[Callable[[FrozenTrial], float]] = None,\n target_name: str = \"Objective Value\",\n) -> \"go.Figure\":\n \"\"\"Plot the parameter relationship as slice plot in a study.\n\n Note that, if a parameter contains missing values, a trial with missing values is not plotted.\n\n Example:\n\n The following code snippet shows how to plot the parameter relationship as slice plot.\n\n .. plotly::\n\n import optuna\n\n\n def objective(trial):\n x = trial.suggest_float(\"x\", -100, 100)\n y = trial.suggest_categorical(\"y\", [-1, 0, 1])\n return x ** 2 + y\n\n\n sampler = optuna.samplers.TPESampler(seed=10)\n study = optuna.create_study(sampler=sampler)\n study.optimize(objective, n_trials=10)\n\n fig = optuna.visualization.plot_slice(study, params=[\"x\", \"y\"])\n fig.show()\n\n Args:\n study:\n A :class:`~optuna.study.Study` object whose trials are plotted for their target values.\n params:\n Parameter list to visualize. The default is all parameters.\n target:\n A function to specify the value to display. If it is :obj:`None` and ``study`` is being\n used for single-objective optimization, the objective values are plotted.\n\n .. note::\n Specify this argument if ``study`` is being used for multi-objective optimization.\n target_name:\n Target's name to display on the axis label.\n\n Returns:\n A :class:`plotly.graph_objs.Figure` object.\n \"\"\"\n\n _imports.check()\n return _get_slice_plot(_get_slice_plot_info(study, params, target, target_name))\n\n\ndef _get_slice_plot(info: _SlicePlotInfo) -> \"go.Figure\":\n\n layout = go.Layout(title=\"Slice Plot\")\n\n if len(info.subplots) == 0:\n return go.Figure(data=[], layout=layout)\n elif len(info.subplots) == 1:\n figure = go.Figure(data=[_generate_slice_subplot(info.subplots[0])], layout=layout)\n figure.update_xaxes(title_text=info.subplots[0].param_name)\n figure.update_yaxes(title_text=info.target_name)\n if info.subplots[0].is_log:\n figure.update_xaxes(type=\"log\")\n else:\n figure = make_subplots(rows=1, cols=len(info.subplots), shared_yaxes=True)\n figure.update_layout(layout)\n showscale = True # showscale option only needs to be specified once.\n for column_index, subplot_info in enumerate(info.subplots, start=1):\n trace = _generate_slice_subplot(subplot_info)\n trace.update(marker={\"showscale\": showscale}) # showscale's default is True.\n if showscale:\n showscale = False\n figure.add_trace(trace, row=1, col=column_index)\n figure.update_xaxes(title_text=subplot_info.param_name, row=1, col=column_index)\n if column_index == 1:\n figure.update_yaxes(title_text=info.target_name, row=1, col=column_index)\n if subplot_info.is_log:\n figure.update_xaxes(type=\"log\", row=1, col=column_index)\n if len(info.subplots) > 3:\n # Ensure that each subplot has a minimum width without relying on autusizing.\n figure.update_layout(width=300 * len(info.subplots))\n\n return figure\n\n\ndef _generate_slice_subplot(subplot_info: _SliceSubplotInfo) -> \"Scatter\":\n x = [x if x is not None else \"None\" for x in subplot_info.x]\n y = [y if y is not None else \"None\" for y in subplot_info.y]\n\n return go.Scatter(\n x=x,\n y=y,\n mode=\"markers\",\n marker={\n \"line\": {\"width\": 0.5, \"color\": \"Grey\"},\n \"color\": subplot_info.trial_numbers,\n \"colorscale\": COLOR_SCALE,\n \"colorbar\": {\n \"title\": \"Trial\",\n \"x\": 1.0, # Offset the colorbar position with a fixed width `xpad`.\n \"xpad\": 40,\n },\n },\n showlegend=False,\n )\n", "path": "optuna/visualization/_slice.py"}]} | 2,708 | 179 |
gh_patches_debug_28308 | rasdani/github-patches | git_diff | sunpy__sunpy-5451 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Order of arguments in `scale` input to `make_fitswcs_header` is wrong in AIA/EUVI reprojection example
In [the AIA/EUVI reprojection mosaic example](https://docs.sunpy.org/en/latest/generated/gallery/map_transformations/reprojection_aia_euvi_mosaic.html), the ordering of the `scale` argument to `make_fitswcs_header` is incorrect. The ordering should be Cartesian (lon, lat) [according to the `make_fitswcs_header` docstring](https://docs.sunpy.org/en/stable/api/sunpy.map.make_fitswcs_header.html#sunpy.map.make_fitswcs_header), but in this example, the order is according to the array index. This actually has no effect on the example output as the scale in both directions is the same (1 deg/pix), but is potentially confusing and conflicts with the function docstring.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/map_transformations/reprojection_heliographic_stonyhurst.py`
Content:
```
1 """
2 ===========================
3 Creating a Heliographic Map
4 ===========================
5
6 In this example we use the `reproject` generate an image in heliographic coordinates from an AIA image.
7
8 You will need `reproject <https://reproject.readthedocs.io/en/stable/>`__ v0.6 or higher installed.
9 """
10 # sphinx_gallery_thumbnail_number = 2
11
12 import matplotlib.pyplot as plt
13 from reproject import reproject_interp
14
15 import astropy.units as u
16 from astropy.coordinates import SkyCoord
17 from astropy.wcs import WCS
18
19 import sunpy.data.sample
20 import sunpy.map
21
22 ###############################################################################
23 # We will start with using sunpy's sample data for this example.
24
25 aia_map = sunpy.map.Map(sunpy.data.sample.AIA_193_IMAGE)
26
27 fig = plt.figure()
28 ax = plt.subplot(projection=aia_map)
29 aia_map.plot(ax)
30
31 ###############################################################################
32 # Reproject works by transforming an input image (with a `~astropy.wcs.WCS`) to
33 # a output image, specified by a different WCS object. Therefore we need to
34 # build a `~astropy.wcs.WCS` object describing the output we desire.
35 # To do this we use the `sunpy.map.make_fitswcs_header` which assists us in
36 # constructing this World Coordinate System (WCS) object.
37 # Here we create a WCS based on a heliographic
38 # Stonyhurst reference coordinate and with the CAR (plate carree) projection.
39
40 shape_out = [720, 1440]
41 frame_out = SkyCoord(0, 0, unit=u.deg,
42 frame="heliographic_stonyhurst",
43 obstime=aia_map.date)
44 header = sunpy.map.make_fitswcs_header(shape_out,
45 frame_out,
46 scale=[180 / shape_out[0],
47 360 / shape_out[1]] * u.deg / u.pix,
48 projection_code="CAR")
49
50 out_wcs = WCS(header)
51
52 ###############################################################################
53 # With the new header, re-project the data into the new coordinate system.
54 # Here we are using the fastest but least accurate method of reprojection,
55 # `reproject.reproject_interp`, a more accurate but slower method is
56 # `reproject.reproject_adaptive`.
57
58 array, footprint = reproject_interp(aia_map, out_wcs, shape_out=shape_out)
59 outmap = sunpy.map.Map((array, header))
60 outmap.plot_settings = aia_map.plot_settings
61
62 ###############################################################################
63 # Plot the result.
64
65 fig = plt.figure()
66 ax = plt.subplot(projection=outmap)
67 outmap.plot(ax)
68
69 ax.set_xlim(0, shape_out[1])
70 ax.set_ylim(0, shape_out[0])
71
72 plt.show()
73
```
Path: `examples/map_transformations/reprojection_aia_euvi_mosaic.py`
Content:
```
1 """
2 =========================================
3 Creating a Full Sun Map with AIA and EUVI
4 =========================================
5
6 With SDO/AIA and STEREO/A and STEREO/B, it is now possible (given specific dates)
7 to combine combine three EUV images from these satellites
8 to produce a full latitude / longitude map of the Sun.
9
10 You will need an active internet connection as well as
11 `reproject <https://reproject.readthedocs.io/en/stable/>`__ v0.6 or higher installed.
12 """
13 # sphinx_gallery_thumbnail_number = 4
14
15 import matplotlib.pyplot as plt
16 import numpy as np
17 from reproject import reproject_interp
18 from reproject.mosaicking import reproject_and_coadd
19
20 import astropy.units as u
21 from astropy.coordinates import SkyCoord
22 from astropy.wcs import WCS
23
24 import sunpy.map
25 import sunpy.sun
26 from sunpy.coordinates import get_body_heliographic_stonyhurst
27 from sunpy.net import Fido
28 from sunpy.net import attrs as a
29
30 ######################################################################
31 # To get started, let's download the data:
32
33 stereo = (a.Instrument("EUVI") &
34 a.Time('2011-11-01', '2011-11-01T00:10:00'))
35 aia = (a.Instrument.aia &
36 a.Sample(24 * u.hour) &
37 a.Time('2011-11-01', '2011-11-02'))
38 wave = a.Wavelength(19.5 * u.nm, 19.5 * u.nm)
39 res = Fido.search(wave, aia | stereo)
40 files = Fido.fetch(res)
41
42 ######################################################################
43 # Next we create a sunpy map for each of the files.
44
45 maps = sunpy.map.Map(sorted(files))
46
47 ######################################################################
48 # To reduce memory consumption we also downsample these maps before continuing,
49 # you can disable this.
50
51 maps = [m.resample((1024, 1024)*u.pix) for m in maps]
52
53 ######################################################################
54 # When combining these images all three need to assume the same radius of
55 # the Sun for the data. The AIA images specify a slightly different value
56 # than the IAU 2015 constant. To avoid coordinate transformation issues we
57 # reset this here.
58
59 maps[0].meta['rsun_ref'] = sunpy.sun.constants.radius.to_value(u.m)
60
61 ######################################################################
62 # Next we will plot the locations of the three spacecraft with respect to
63 # the Sun so we can easily see the relative separations.
64
65 earth = get_body_heliographic_stonyhurst('earth', maps[0].date)
66
67 plt.figure(figsize=(8, 8))
68 r_unit = u.AU
69
70 ax = plt.subplot(projection='polar')
71 circle = plt.Circle((0.0, 0.0), (10*u.Rsun).to_value(r_unit),
72 transform=ax.transProjectionAffine + ax.transAxes, color="yellow",
73 alpha=1, label="Sun")
74 ax.add_artist(circle)
75 ax.text(earth.lon.to_value("rad")+0.05, earth.radius.to_value(r_unit), "Earth")
76
77 for this_satellite, this_coord in [(m.observatory, m.observer_coordinate) for m in maps]:
78 ax.plot(this_coord.lon.to('rad'), this_coord.radius.to(r_unit), 'o', label=this_satellite)
79
80 ax.set_theta_zero_location("S")
81 ax.set_rlim(0, 1.3)
82
83 ax.legend()
84
85 plt.show()
86
87 ######################################################################
88 # The next step is to calculate the output coordinate system for the combined
89 # map. We select a heliographic Stonyhurst frame, and a Plate Carree (CAR)
90 # projection, and generate a header using `sunpy.map.make_fitswcs_header` and
91 # then construct a World Coordinate System (WCS) object for that header.
92
93 shape_out = (180, 360) # This is set deliberately low to reduce memory consumption
94 header = sunpy.map.make_fitswcs_header(shape_out,
95 SkyCoord(0, 0, unit=u.deg,
96 frame="heliographic_stonyhurst",
97 obstime=maps[0].date),
98 scale=[180 / shape_out[0],
99 360 / shape_out[1]] * u.deg / u.pix,
100 wavelength=int(maps[0].meta['wavelnth']) * u.AA,
101 projection_code="CAR")
102 out_wcs = WCS(header)
103
104 ######################################################################
105 # Next we call the `reproject.mosaicking.reproject_and_coadd` function, which
106 # takes a list of maps, and the desired output WCS and array shape.
107
108 array, footprint = reproject_and_coadd(maps, out_wcs, shape_out,
109 reproject_function=reproject_interp)
110
111 ######################################################################
112 # To display the output we construct a new map using the new array and our
113 # generated header. We also borrow the plot settings from the AIA map.
114
115 outmap = sunpy.map.Map((array, header))
116 outmap.plot_settings = maps[0].plot_settings
117 outmap.plot()
118
119 plt.show()
120
121 ######################################################################
122 # Improving the Output
123 # --------------------
124 #
125 # As you can see this leaves a little to be desired. To reduce the obvious
126 # warping towards the points which are close to the limb in the input
127 # images, we can define a set of weights to use when co-adding the output
128 # arrays. To reduce this warping we want to calculate an set of weights
129 # which highly weigh points close to the centre of the disk in the input
130 # image.
131 #
132 # We can achieve this by using sunpy's coordinate framework. First we
133 # calculate all the world coordinates for all the pixels in all three
134 # input maps.
135
136 coordinates = tuple(map(sunpy.map.all_coordinates_from_map, maps))
137
138 ######################################################################
139 # To get a weighting which is high close to disk centre and low towards
140 # the limb, we can use the Z coordinate in the heliocentric frame. This
141 # coordinate is the distance of the sphere from the centre of the Sun
142 # towards the observer.
143
144 weights = [coord.transform_to("heliocentric").z.value for coord in coordinates]
145
146 ######################################################################
147 # These weights are good, but they are better if the ramp down is a little
148 # smoother, and more biased to the centre. Also we can scale them to the
149 # range 0-1, and set any off disk (NaN) regions to 0.
150
151 weights = [(w / np.nanmax(w)) ** 3 for w in weights]
152 for w in weights:
153 w[np.isnan(w)] = 0
154
155 plt.figure()
156 plt.imshow(weights[0])
157 plt.colorbar()
158
159 plt.show()
160
161 ######################################################################
162 # Now we can rerun the reprojection. This time we also set
163 # ``match_background=True`` which scales the images by a single scaling
164 # factor so they are of similar brightness. We also set
165 # ``background_reference=0`` which uses the AIA map as the reference for
166 # the background scaling.
167 #
168 # Here we are using the fastest but least accurate method of reprojection,
169 # `reproject.reproject_interp`, a more accurate but slower method is
170 # `reproject.reproject_adaptive`.
171
172 array, _ = reproject_and_coadd(maps, out_wcs, shape_out,
173 input_weights=weights,
174 reproject_function=reproject_interp,
175 match_background=True,
176 background_reference=0)
177
178 ######################################################################
179 # Once again we create a new map, and this time we customise the plot a
180 # little.
181
182 outmap = sunpy.map.Map((array, header))
183 outmap.plot_settings = maps[0].plot_settings
184 outmap.nickname = 'AIA + EUVI/A + EUVI/B'
185
186 plt.figure(figsize=(10, 5))
187 ax = plt.subplot(projection=out_wcs)
188 im = outmap.plot(vmin=400)
189
190 lon, lat = ax.coords
191 lon.set_coord_type("longitude")
192 lon.coord_wrap = 180
193 lon.set_format_unit(u.deg)
194 lat.set_coord_type("latitude")
195 lat.set_format_unit(u.deg)
196
197 lon.set_axislabel('Heliographic Longitude', minpad=0.8)
198 lat.set_axislabel('Heliographic Latitude', minpad=0.9)
199 lon.set_ticks(spacing=25*u.deg, color='k')
200 lat.set_ticks(spacing=15*u.deg, color='k')
201
202 plt.colorbar(im, ax=ax)
203
204 # Reset the view to pixel centers
205 _ = ax.axis((0, shape_out[1], 0, shape_out[0]))
206
207 plt.show()
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/examples/map_transformations/reprojection_aia_euvi_mosaic.py b/examples/map_transformations/reprojection_aia_euvi_mosaic.py
--- a/examples/map_transformations/reprojection_aia_euvi_mosaic.py
+++ b/examples/map_transformations/reprojection_aia_euvi_mosaic.py
@@ -95,8 +95,8 @@
SkyCoord(0, 0, unit=u.deg,
frame="heliographic_stonyhurst",
obstime=maps[0].date),
- scale=[180 / shape_out[0],
- 360 / shape_out[1]] * u.deg / u.pix,
+ scale=[360 / shape_out[1],
+ 180 / shape_out[0]] * u.deg / u.pix,
wavelength=int(maps[0].meta['wavelnth']) * u.AA,
projection_code="CAR")
out_wcs = WCS(header)
diff --git a/examples/map_transformations/reprojection_heliographic_stonyhurst.py b/examples/map_transformations/reprojection_heliographic_stonyhurst.py
--- a/examples/map_transformations/reprojection_heliographic_stonyhurst.py
+++ b/examples/map_transformations/reprojection_heliographic_stonyhurst.py
@@ -43,8 +43,8 @@
obstime=aia_map.date)
header = sunpy.map.make_fitswcs_header(shape_out,
frame_out,
- scale=[180 / shape_out[0],
- 360 / shape_out[1]] * u.deg / u.pix,
+ scale=[360 / shape_out[1],
+ 180 / shape_out[0]] * u.deg / u.pix,
projection_code="CAR")
out_wcs = WCS(header)
| {"golden_diff": "diff --git a/examples/map_transformations/reprojection_aia_euvi_mosaic.py b/examples/map_transformations/reprojection_aia_euvi_mosaic.py\n--- a/examples/map_transformations/reprojection_aia_euvi_mosaic.py\n+++ b/examples/map_transformations/reprojection_aia_euvi_mosaic.py\n@@ -95,8 +95,8 @@\n SkyCoord(0, 0, unit=u.deg,\n frame=\"heliographic_stonyhurst\",\n obstime=maps[0].date),\n- scale=[180 / shape_out[0],\n- 360 / shape_out[1]] * u.deg / u.pix,\n+ scale=[360 / shape_out[1],\n+ 180 / shape_out[0]] * u.deg / u.pix,\n wavelength=int(maps[0].meta['wavelnth']) * u.AA,\n projection_code=\"CAR\")\n out_wcs = WCS(header)\ndiff --git a/examples/map_transformations/reprojection_heliographic_stonyhurst.py b/examples/map_transformations/reprojection_heliographic_stonyhurst.py\n--- a/examples/map_transformations/reprojection_heliographic_stonyhurst.py\n+++ b/examples/map_transformations/reprojection_heliographic_stonyhurst.py\n@@ -43,8 +43,8 @@\n obstime=aia_map.date)\n header = sunpy.map.make_fitswcs_header(shape_out,\n frame_out,\n- scale=[180 / shape_out[0],\n- 360 / shape_out[1]] * u.deg / u.pix,\n+ scale=[360 / shape_out[1],\n+ 180 / shape_out[0]] * u.deg / u.pix,\n projection_code=\"CAR\")\n \n out_wcs = WCS(header)\n", "issue": "Order of arguments in `scale` input to `make_fitswcs_header` is wrong in AIA/EUVI reprojection example\nIn [the AIA/EUVI reprojection mosaic example](https://docs.sunpy.org/en/latest/generated/gallery/map_transformations/reprojection_aia_euvi_mosaic.html), the ordering of the `scale` argument to `make_fitswcs_header` is incorrect. The ordering should be Cartesian (lon, lat) [according to the `make_fitswcs_header` docstring](https://docs.sunpy.org/en/stable/api/sunpy.map.make_fitswcs_header.html#sunpy.map.make_fitswcs_header), but in this example, the order is according to the array index. This actually has no effect on the example output as the scale in both directions is the same (1 deg/pix), but is potentially confusing and conflicts with the function docstring.\n", "before_files": [{"content": "\"\"\"\n===========================\nCreating a Heliographic Map\n===========================\n\nIn this example we use the `reproject` generate an image in heliographic coordinates from an AIA image.\n\nYou will need `reproject <https://reproject.readthedocs.io/en/stable/>`__ v0.6 or higher installed.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\n\nimport matplotlib.pyplot as plt\nfrom reproject import reproject_interp\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.wcs import WCS\n\nimport sunpy.data.sample\nimport sunpy.map\n\n###############################################################################\n# We will start with using sunpy's sample data for this example.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_193_IMAGE)\n\nfig = plt.figure()\nax = plt.subplot(projection=aia_map)\naia_map.plot(ax)\n\n###############################################################################\n# Reproject works by transforming an input image (with a `~astropy.wcs.WCS`) to\n# a output image, specified by a different WCS object. Therefore we need to\n# build a `~astropy.wcs.WCS` object describing the output we desire.\n# To do this we use the `sunpy.map.make_fitswcs_header` which assists us in\n# constructing this World Coordinate System (WCS) object.\n# Here we create a WCS based on a heliographic\n# Stonyhurst reference coordinate and with the CAR (plate carree) projection.\n\nshape_out = [720, 1440]\nframe_out = SkyCoord(0, 0, unit=u.deg,\n frame=\"heliographic_stonyhurst\",\n obstime=aia_map.date)\nheader = sunpy.map.make_fitswcs_header(shape_out,\n frame_out,\n scale=[180 / shape_out[0],\n 360 / shape_out[1]] * u.deg / u.pix,\n projection_code=\"CAR\")\n\nout_wcs = WCS(header)\n\n###############################################################################\n# With the new header, re-project the data into the new coordinate system.\n# Here we are using the fastest but least accurate method of reprojection,\n# `reproject.reproject_interp`, a more accurate but slower method is\n# `reproject.reproject_adaptive`.\n\narray, footprint = reproject_interp(aia_map, out_wcs, shape_out=shape_out)\noutmap = sunpy.map.Map((array, header))\noutmap.plot_settings = aia_map.plot_settings\n\n###############################################################################\n# Plot the result.\n\nfig = plt.figure()\nax = plt.subplot(projection=outmap)\noutmap.plot(ax)\n\nax.set_xlim(0, shape_out[1])\nax.set_ylim(0, shape_out[0])\n\nplt.show()\n", "path": "examples/map_transformations/reprojection_heliographic_stonyhurst.py"}, {"content": "\"\"\"\n=========================================\nCreating a Full Sun Map with AIA and EUVI\n=========================================\n\nWith SDO/AIA and STEREO/A and STEREO/B, it is now possible (given specific dates)\nto combine combine three EUV images from these satellites\nto produce a full latitude / longitude map of the Sun.\n\nYou will need an active internet connection as well as\n`reproject <https://reproject.readthedocs.io/en/stable/>`__ v0.6 or higher installed.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 4\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom reproject import reproject_interp\nfrom reproject.mosaicking import reproject_and_coadd\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.wcs import WCS\n\nimport sunpy.map\nimport sunpy.sun\nfrom sunpy.coordinates import get_body_heliographic_stonyhurst\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\n######################################################################\n# To get started, let's download the data:\n\nstereo = (a.Instrument(\"EUVI\") &\n a.Time('2011-11-01', '2011-11-01T00:10:00'))\naia = (a.Instrument.aia &\n a.Sample(24 * u.hour) &\n a.Time('2011-11-01', '2011-11-02'))\nwave = a.Wavelength(19.5 * u.nm, 19.5 * u.nm)\nres = Fido.search(wave, aia | stereo)\nfiles = Fido.fetch(res)\n\n######################################################################\n# Next we create a sunpy map for each of the files.\n\nmaps = sunpy.map.Map(sorted(files))\n\n######################################################################\n# To reduce memory consumption we also downsample these maps before continuing,\n# you can disable this.\n\nmaps = [m.resample((1024, 1024)*u.pix) for m in maps]\n\n######################################################################\n# When combining these images all three need to assume the same radius of\n# the Sun for the data. The AIA images specify a slightly different value\n# than the IAU 2015 constant. To avoid coordinate transformation issues we\n# reset this here.\n\nmaps[0].meta['rsun_ref'] = sunpy.sun.constants.radius.to_value(u.m)\n\n######################################################################\n# Next we will plot the locations of the three spacecraft with respect to\n# the Sun so we can easily see the relative separations.\n\nearth = get_body_heliographic_stonyhurst('earth', maps[0].date)\n\nplt.figure(figsize=(8, 8))\nr_unit = u.AU\n\nax = plt.subplot(projection='polar')\ncircle = plt.Circle((0.0, 0.0), (10*u.Rsun).to_value(r_unit),\n transform=ax.transProjectionAffine + ax.transAxes, color=\"yellow\",\n alpha=1, label=\"Sun\")\nax.add_artist(circle)\nax.text(earth.lon.to_value(\"rad\")+0.05, earth.radius.to_value(r_unit), \"Earth\")\n\nfor this_satellite, this_coord in [(m.observatory, m.observer_coordinate) for m in maps]:\n ax.plot(this_coord.lon.to('rad'), this_coord.radius.to(r_unit), 'o', label=this_satellite)\n\nax.set_theta_zero_location(\"S\")\nax.set_rlim(0, 1.3)\n\nax.legend()\n\nplt.show()\n\n######################################################################\n# The next step is to calculate the output coordinate system for the combined\n# map. We select a heliographic Stonyhurst frame, and a Plate Carree (CAR)\n# projection, and generate a header using `sunpy.map.make_fitswcs_header` and\n# then construct a World Coordinate System (WCS) object for that header.\n\nshape_out = (180, 360) # This is set deliberately low to reduce memory consumption\nheader = sunpy.map.make_fitswcs_header(shape_out,\n SkyCoord(0, 0, unit=u.deg,\n frame=\"heliographic_stonyhurst\",\n obstime=maps[0].date),\n scale=[180 / shape_out[0],\n 360 / shape_out[1]] * u.deg / u.pix,\n wavelength=int(maps[0].meta['wavelnth']) * u.AA,\n projection_code=\"CAR\")\nout_wcs = WCS(header)\n\n######################################################################\n# Next we call the `reproject.mosaicking.reproject_and_coadd` function, which\n# takes a list of maps, and the desired output WCS and array shape.\n\narray, footprint = reproject_and_coadd(maps, out_wcs, shape_out,\n reproject_function=reproject_interp)\n\n######################################################################\n# To display the output we construct a new map using the new array and our\n# generated header. We also borrow the plot settings from the AIA map.\n\noutmap = sunpy.map.Map((array, header))\noutmap.plot_settings = maps[0].plot_settings\noutmap.plot()\n\nplt.show()\n\n######################################################################\n# Improving the Output\n# --------------------\n#\n# As you can see this leaves a little to be desired. To reduce the obvious\n# warping towards the points which are close to the limb in the input\n# images, we can define a set of weights to use when co-adding the output\n# arrays. To reduce this warping we want to calculate an set of weights\n# which highly weigh points close to the centre of the disk in the input\n# image.\n#\n# We can achieve this by using sunpy's coordinate framework. First we\n# calculate all the world coordinates for all the pixels in all three\n# input maps.\n\ncoordinates = tuple(map(sunpy.map.all_coordinates_from_map, maps))\n\n######################################################################\n# To get a weighting which is high close to disk centre and low towards\n# the limb, we can use the Z coordinate in the heliocentric frame. This\n# coordinate is the distance of the sphere from the centre of the Sun\n# towards the observer.\n\nweights = [coord.transform_to(\"heliocentric\").z.value for coord in coordinates]\n\n######################################################################\n# These weights are good, but they are better if the ramp down is a little\n# smoother, and more biased to the centre. Also we can scale them to the\n# range 0-1, and set any off disk (NaN) regions to 0.\n\nweights = [(w / np.nanmax(w)) ** 3 for w in weights]\nfor w in weights:\n w[np.isnan(w)] = 0\n\nplt.figure()\nplt.imshow(weights[0])\nplt.colorbar()\n\nplt.show()\n\n######################################################################\n# Now we can rerun the reprojection. This time we also set\n# ``match_background=True`` which scales the images by a single scaling\n# factor so they are of similar brightness. We also set\n# ``background_reference=0`` which uses the AIA map as the reference for\n# the background scaling.\n#\n# Here we are using the fastest but least accurate method of reprojection,\n# `reproject.reproject_interp`, a more accurate but slower method is\n# `reproject.reproject_adaptive`.\n\narray, _ = reproject_and_coadd(maps, out_wcs, shape_out,\n input_weights=weights,\n reproject_function=reproject_interp,\n match_background=True,\n background_reference=0)\n\n######################################################################\n# Once again we create a new map, and this time we customise the plot a\n# little.\n\noutmap = sunpy.map.Map((array, header))\noutmap.plot_settings = maps[0].plot_settings\noutmap.nickname = 'AIA + EUVI/A + EUVI/B'\n\nplt.figure(figsize=(10, 5))\nax = plt.subplot(projection=out_wcs)\nim = outmap.plot(vmin=400)\n\nlon, lat = ax.coords\nlon.set_coord_type(\"longitude\")\nlon.coord_wrap = 180\nlon.set_format_unit(u.deg)\nlat.set_coord_type(\"latitude\")\nlat.set_format_unit(u.deg)\n\nlon.set_axislabel('Heliographic Longitude', minpad=0.8)\nlat.set_axislabel('Heliographic Latitude', minpad=0.9)\nlon.set_ticks(spacing=25*u.deg, color='k')\nlat.set_ticks(spacing=15*u.deg, color='k')\n\nplt.colorbar(im, ax=ax)\n\n# Reset the view to pixel centers\n_ = ax.axis((0, shape_out[1], 0, shape_out[0]))\n\nplt.show()\n", "path": "examples/map_transformations/reprojection_aia_euvi_mosaic.py"}], "after_files": [{"content": "\"\"\"\n===========================\nCreating a Heliographic Map\n===========================\n\nIn this example we use the `reproject` generate an image in heliographic coordinates from an AIA image.\n\nYou will need `reproject <https://reproject.readthedocs.io/en/stable/>`__ v0.6 or higher installed.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 2\n\nimport matplotlib.pyplot as plt\nfrom reproject import reproject_interp\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.wcs import WCS\n\nimport sunpy.data.sample\nimport sunpy.map\n\n###############################################################################\n# We will start with using sunpy's sample data for this example.\n\naia_map = sunpy.map.Map(sunpy.data.sample.AIA_193_IMAGE)\n\nfig = plt.figure()\nax = plt.subplot(projection=aia_map)\naia_map.plot(ax)\n\n###############################################################################\n# Reproject works by transforming an input image (with a `~astropy.wcs.WCS`) to\n# a output image, specified by a different WCS object. Therefore we need to\n# build a `~astropy.wcs.WCS` object describing the output we desire.\n# To do this we use the `sunpy.map.make_fitswcs_header` which assists us in\n# constructing this World Coordinate System (WCS) object.\n# Here we create a WCS based on a heliographic\n# Stonyhurst reference coordinate and with the CAR (plate carree) projection.\n\nshape_out = [720, 1440]\nframe_out = SkyCoord(0, 0, unit=u.deg,\n frame=\"heliographic_stonyhurst\",\n obstime=aia_map.date)\nheader = sunpy.map.make_fitswcs_header(shape_out,\n frame_out,\n scale=[360 / shape_out[1],\n 180 / shape_out[0]] * u.deg / u.pix,\n projection_code=\"CAR\")\n\nout_wcs = WCS(header)\n\n###############################################################################\n# With the new header, re-project the data into the new coordinate system.\n# Here we are using the fastest but least accurate method of reprojection,\n# `reproject.reproject_interp`, a more accurate but slower method is\n# `reproject.reproject_adaptive`.\n\narray, footprint = reproject_interp(aia_map, out_wcs, shape_out=shape_out)\noutmap = sunpy.map.Map((array, header))\noutmap.plot_settings = aia_map.plot_settings\n\n###############################################################################\n# Plot the result.\n\nfig = plt.figure()\nax = plt.subplot(projection=outmap)\noutmap.plot(ax)\n\nax.set_xlim(0, shape_out[1])\nax.set_ylim(0, shape_out[0])\n\nplt.show()\n", "path": "examples/map_transformations/reprojection_heliographic_stonyhurst.py"}, {"content": "\"\"\"\n=========================================\nCreating a Full Sun Map with AIA and EUVI\n=========================================\n\nWith SDO/AIA and STEREO/A and STEREO/B, it is now possible (given specific dates)\nto combine combine three EUV images from these satellites\nto produce a full latitude / longitude map of the Sun.\n\nYou will need an active internet connection as well as\n`reproject <https://reproject.readthedocs.io/en/stable/>`__ v0.6 or higher installed.\n\"\"\"\n# sphinx_gallery_thumbnail_number = 4\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom reproject import reproject_interp\nfrom reproject.mosaicking import reproject_and_coadd\n\nimport astropy.units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.wcs import WCS\n\nimport sunpy.map\nimport sunpy.sun\nfrom sunpy.coordinates import get_body_heliographic_stonyhurst\nfrom sunpy.net import Fido\nfrom sunpy.net import attrs as a\n\n######################################################################\n# To get started, let's download the data:\n\nstereo = (a.Instrument(\"EUVI\") &\n a.Time('2011-11-01', '2011-11-01T00:10:00'))\naia = (a.Instrument.aia &\n a.Sample(24 * u.hour) &\n a.Time('2011-11-01', '2011-11-02'))\nwave = a.Wavelength(19.5 * u.nm, 19.5 * u.nm)\nres = Fido.search(wave, aia | stereo)\nfiles = Fido.fetch(res)\n\n######################################################################\n# Next we create a sunpy map for each of the files.\n\nmaps = sunpy.map.Map(sorted(files))\n\n######################################################################\n# To reduce memory consumption we also downsample these maps before continuing,\n# you can disable this.\n\nmaps = [m.resample((1024, 1024)*u.pix) for m in maps]\n\n######################################################################\n# When combining these images all three need to assume the same radius of\n# the Sun for the data. The AIA images specify a slightly different value\n# than the IAU 2015 constant. To avoid coordinate transformation issues we\n# reset this here.\n\nmaps[0].meta['rsun_ref'] = sunpy.sun.constants.radius.to_value(u.m)\n\n######################################################################\n# Next we will plot the locations of the three spacecraft with respect to\n# the Sun so we can easily see the relative separations.\n\nearth = get_body_heliographic_stonyhurst('earth', maps[0].date)\n\nplt.figure(figsize=(8, 8))\nr_unit = u.AU\n\nax = plt.subplot(projection='polar')\ncircle = plt.Circle((0.0, 0.0), (10*u.Rsun).to_value(r_unit),\n transform=ax.transProjectionAffine + ax.transAxes, color=\"yellow\",\n alpha=1, label=\"Sun\")\nax.add_artist(circle)\nax.text(earth.lon.to_value(\"rad\")+0.05, earth.radius.to_value(r_unit), \"Earth\")\n\nfor this_satellite, this_coord in [(m.observatory, m.observer_coordinate) for m in maps]:\n ax.plot(this_coord.lon.to('rad'), this_coord.radius.to(r_unit), 'o', label=this_satellite)\n\nax.set_theta_zero_location(\"S\")\nax.set_rlim(0, 1.3)\n\nax.legend()\n\nplt.show()\n\n######################################################################\n# The next step is to calculate the output coordinate system for the combined\n# map. We select a heliographic Stonyhurst frame, and a Plate Carree (CAR)\n# projection, and generate a header using `sunpy.map.make_fitswcs_header` and\n# then construct a World Coordinate System (WCS) object for that header.\n\nshape_out = (180, 360) # This is set deliberately low to reduce memory consumption\nheader = sunpy.map.make_fitswcs_header(shape_out,\n SkyCoord(0, 0, unit=u.deg,\n frame=\"heliographic_stonyhurst\",\n obstime=maps[0].date),\n scale=[360 / shape_out[1],\n 180 / shape_out[0]] * u.deg / u.pix,\n wavelength=int(maps[0].meta['wavelnth']) * u.AA,\n projection_code=\"CAR\")\nout_wcs = WCS(header)\n\n######################################################################\n# Next we call the `reproject.mosaicking.reproject_and_coadd` function, which\n# takes a list of maps, and the desired output WCS and array shape.\n\narray, footprint = reproject_and_coadd(maps, out_wcs, shape_out,\n reproject_function=reproject_interp)\n\n######################################################################\n# To display the output we construct a new map using the new array and our\n# generated header. We also borrow the plot settings from the AIA map.\n\noutmap = sunpy.map.Map((array, header))\noutmap.plot_settings = maps[0].plot_settings\noutmap.plot()\n\nplt.show()\n\n######################################################################\n# Improving the Output\n# --------------------\n#\n# As you can see this leaves a little to be desired. To reduce the obvious\n# warping towards the points which are close to the limb in the input\n# images, we can define a set of weights to use when co-adding the output\n# arrays. To reduce this warping we want to calculate an set of weights\n# which highly weigh points close to the centre of the disk in the input\n# image.\n#\n# We can achieve this by using sunpy's coordinate framework. First we\n# calculate all the world coordinates for all the pixels in all three\n# input maps.\n\ncoordinates = tuple(map(sunpy.map.all_coordinates_from_map, maps))\n\n######################################################################\n# To get a weighting which is high close to disk centre and low towards\n# the limb, we can use the Z coordinate in the heliocentric frame. This\n# coordinate is the distance of the sphere from the centre of the Sun\n# towards the observer.\n\nweights = [coord.transform_to(\"heliocentric\").z.value for coord in coordinates]\n\n######################################################################\n# These weights are good, but they are better if the ramp down is a little\n# smoother, and more biased to the centre. Also we can scale them to the\n# range 0-1, and set any off disk (NaN) regions to 0.\n\nweights = [(w / np.nanmax(w)) ** 3 for w in weights]\nfor w in weights:\n w[np.isnan(w)] = 0\n\nplt.figure()\nplt.imshow(weights[0])\nplt.colorbar()\n\nplt.show()\n\n######################################################################\n# Now we can rerun the reprojection. This time we also set\n# ``match_background=True`` which scales the images by a single scaling\n# factor so they are of similar brightness. We also set\n# ``background_reference=0`` which uses the AIA map as the reference for\n# the background scaling.\n#\n# Here we are using the fastest but least accurate method of reprojection,\n# `reproject.reproject_interp`, a more accurate but slower method is\n# `reproject.reproject_adaptive`.\n\narray, _ = reproject_and_coadd(maps, out_wcs, shape_out,\n input_weights=weights,\n reproject_function=reproject_interp,\n match_background=True,\n background_reference=0)\n\n######################################################################\n# Once again we create a new map, and this time we customise the plot a\n# little.\n\noutmap = sunpy.map.Map((array, header))\noutmap.plot_settings = maps[0].plot_settings\noutmap.nickname = 'AIA + EUVI/A + EUVI/B'\n\nplt.figure(figsize=(10, 5))\nax = plt.subplot(projection=out_wcs)\nim = outmap.plot(vmin=400)\n\nlon, lat = ax.coords\nlon.set_coord_type(\"longitude\")\nlon.coord_wrap = 180\nlon.set_format_unit(u.deg)\nlat.set_coord_type(\"latitude\")\nlat.set_format_unit(u.deg)\n\nlon.set_axislabel('Heliographic Longitude', minpad=0.8)\nlat.set_axislabel('Heliographic Latitude', minpad=0.9)\nlon.set_ticks(spacing=25*u.deg, color='k')\nlat.set_ticks(spacing=15*u.deg, color='k')\n\nplt.colorbar(im, ax=ax)\n\n# Reset the view to pixel centers\n_ = ax.axis((0, shape_out[1], 0, shape_out[0]))\n\nplt.show()\n", "path": "examples/map_transformations/reprojection_aia_euvi_mosaic.py"}]} | 3,580 | 388 |
gh_patches_debug_16989 | rasdani/github-patches | git_diff | gpodder__mygpo-493 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
API: Device Synchronization API - Start / Stop Sync returning HTTP status 500
During my work on PR https://github.com/gpodder/mygpo/pull/122 is was testing the Device Synchronization API - Start / Stop Sync (https://gpoddernet.readthedocs.io/en/latest/api/reference/sync.html#post--api-2-sync-devices-(username).json)
I sent the following request
```json
{
"synchronize": [
[
"my-desktop", "cellphone"
]
]
}
```
and it is returning HTTP 500
```html
<html>
<head>
<title>500 Internal server error (gpodder.net)</title>
<link rel="stylesheet" type="text/css" href="/static/css/fail.css" />
</head>
<body>
<div id="c">
<div id="fail">
<h1>500 - Internal server error.</h1>
<p>
The service is currently overloaded.
Please try again later or contact us.
</p>
</div>
</div>
<img id="icon" src="/static/failpodder.png">
</body>
</html>
```
as a reference, a previous call to https://gpoddernet.readthedocs.io/en/latest/api/reference/sync.html#get--api-2-sync-devices-(username).json was returning:
```json
{
"synchronized": [],
"not-synchronized": [
"cellphone",
"my-desktop"
]
}
```
I'm able ot sync this devices on the web ui though.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mygpo/api/advanced/sync.py`
Content:
```
1 from django.http import HttpResponseBadRequest, HttpResponseNotFound
2 from django.views.decorators.csrf import csrf_exempt
3 from django.views.decorators.cache import never_cache
4
5 from mygpo.decorators import allowed_methods, cors_origin
6 from mygpo.utils import parse_request_body
7 from mygpo.api.basic_auth import require_valid_user, check_username
8 from mygpo.api.httpresponse import JsonResponse
9 from mygpo.users.models import Client, UserProxy
10 from mygpo.users.tasks import sync_user
11
12
13 @csrf_exempt
14 @require_valid_user
15 @check_username
16 @never_cache
17 @allowed_methods(["GET", "POST"])
18 @cors_origin()
19 def main(request, username):
20 """ API Endpoint for Device Synchronisation """
21
22 if request.method == "GET":
23 return JsonResponse(get_sync_status(request.user))
24
25 else:
26 try:
27 actions = parse_request_body(request)
28 except ValueError as e:
29 return HttpResponseBadRequest(str(e))
30
31 synclist = actions.get("synchronize", [])
32 stopsync = actions.get("stop-synchronize", [])
33
34 try:
35 update_sync_status(request.user, synclist, stopsync)
36 except ValueError as e:
37 return HttpResponseBadRequest(str(e))
38 except Client.DoesNotExist as e:
39 return HttpResponseNotFound(str(e))
40
41 return JsonResponse(get_sync_status(user))
42
43
44 def get_sync_status(user):
45 """ Returns the current Device Sync status """
46
47 sync_groups = []
48 unsynced = []
49
50 user = UserProxy.objects.from_user(user)
51 for group in user.get_grouped_devices():
52 uids = [device.uid for device in group.devices]
53
54 if group.is_synced:
55 sync_groups.append(uids)
56
57 else:
58 unsynced = uids
59
60 return {"synchronized": sync_groups, "not-synchronized": unsynced}
61
62
63 def update_sync_status(user, synclist, stopsync):
64 """Updates the current Device Sync status
65
66 Synchronisation between devices can be set up and stopped. Devices are
67 identified by their UIDs. Unknown UIDs cause errors, no new devices are
68 created."""
69
70 for devlist in synclist:
71
72 if len(devlist) <= 1:
73 raise ValueError("at least two devices are needed to sync")
74
75 # Setup all devices to sync with the first in the list
76 uid = devlist[0]
77 dev = user.client_set.get(uid=uid)
78
79 for other_uid in devlist[1:]:
80 other = user.get_device_by_uid(other_uid)
81 dev.sync_with(other)
82
83 for uid in stopsync:
84 dev = user.get_device_by_uid(uid)
85 try:
86 dev.stop_sync()
87 except ValueError:
88 # if all devices of a sync-group are un-synced,
89 # the last one will raise a ValueError, because it is no longer
90 # being synced -- we just ignore it
91 pass
92
93 user.save()
94
95 sync_user.delay(user)
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/mygpo/api/advanced/sync.py b/mygpo/api/advanced/sync.py
--- a/mygpo/api/advanced/sync.py
+++ b/mygpo/api/advanced/sync.py
@@ -38,7 +38,7 @@
except Client.DoesNotExist as e:
return HttpResponseNotFound(str(e))
- return JsonResponse(get_sync_status(user))
+ return JsonResponse(get_sync_status(request.user))
def get_sync_status(user):
@@ -77,11 +77,11 @@
dev = user.client_set.get(uid=uid)
for other_uid in devlist[1:]:
- other = user.get_device_by_uid(other_uid)
+ other = user.client_set.get(uid=other_uid)
dev.sync_with(other)
for uid in stopsync:
- dev = user.get_device_by_uid(uid)
+ dev = user.client_set.get(uid=uid)
try:
dev.stop_sync()
except ValueError:
| {"golden_diff": "diff --git a/mygpo/api/advanced/sync.py b/mygpo/api/advanced/sync.py\n--- a/mygpo/api/advanced/sync.py\n+++ b/mygpo/api/advanced/sync.py\n@@ -38,7 +38,7 @@\n except Client.DoesNotExist as e:\n return HttpResponseNotFound(str(e))\n \n- return JsonResponse(get_sync_status(user))\n+ return JsonResponse(get_sync_status(request.user))\n \n \n def get_sync_status(user):\n@@ -77,11 +77,11 @@\n dev = user.client_set.get(uid=uid)\n \n for other_uid in devlist[1:]:\n- other = user.get_device_by_uid(other_uid)\n+ other = user.client_set.get(uid=other_uid)\n dev.sync_with(other)\n \n for uid in stopsync:\n- dev = user.get_device_by_uid(uid)\n+ dev = user.client_set.get(uid=uid)\n try:\n dev.stop_sync()\n except ValueError:\n", "issue": "API: Device Synchronization API - Start / Stop Sync returning HTTP status 500\nDuring my work on PR https://github.com/gpodder/mygpo/pull/122 is was testing the Device Synchronization API - Start / Stop Sync (https://gpoddernet.readthedocs.io/en/latest/api/reference/sync.html#post--api-2-sync-devices-(username).json)\r\n\r\nI sent the following request\r\n```json\r\n{\r\n \"synchronize\": [\r\n [\r\n \"my-desktop\", \"cellphone\"\r\n ]\r\n ]\r\n}\r\n```\r\n\r\nand it is returning HTTP 500\r\n```html\r\n<html>\r\n <head>\r\n <title>500 Internal server error (gpodder.net)</title>\r\n <link rel=\"stylesheet\" type=\"text/css\" href=\"/static/css/fail.css\" />\r\n </head>\r\n <body>\r\n <div id=\"c\">\r\n <div id=\"fail\">\r\n <h1>500 - Internal server error.</h1>\r\n <p>\r\n The service is currently overloaded.\r\n Please try again later or contact us.\r\n </p>\r\n </div>\r\n </div>\r\n <img id=\"icon\" src=\"/static/failpodder.png\">\r\n </body>\r\n</html>\r\n```\r\n\r\nas a reference, a previous call to https://gpoddernet.readthedocs.io/en/latest/api/reference/sync.html#get--api-2-sync-devices-(username).json was returning:\r\n```json\r\n{\r\n \"synchronized\": [],\r\n \"not-synchronized\": [\r\n \"cellphone\",\r\n \"my-desktop\"\r\n ]\r\n}\r\n```\r\n\r\nI'm able ot sync this devices on the web ui though.\n", "before_files": [{"content": "from django.http import HttpResponseBadRequest, HttpResponseNotFound\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.cache import never_cache\n\nfrom mygpo.decorators import allowed_methods, cors_origin\nfrom mygpo.utils import parse_request_body\nfrom mygpo.api.basic_auth import require_valid_user, check_username\nfrom mygpo.api.httpresponse import JsonResponse\nfrom mygpo.users.models import Client, UserProxy\nfrom mygpo.users.tasks import sync_user\n\n\n@csrf_exempt\n@require_valid_user\n@check_username\n@never_cache\n@allowed_methods([\"GET\", \"POST\"])\n@cors_origin()\ndef main(request, username):\n \"\"\" API Endpoint for Device Synchronisation \"\"\"\n\n if request.method == \"GET\":\n return JsonResponse(get_sync_status(request.user))\n\n else:\n try:\n actions = parse_request_body(request)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n synclist = actions.get(\"synchronize\", [])\n stopsync = actions.get(\"stop-synchronize\", [])\n\n try:\n update_sync_status(request.user, synclist, stopsync)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n except Client.DoesNotExist as e:\n return HttpResponseNotFound(str(e))\n\n return JsonResponse(get_sync_status(user))\n\n\ndef get_sync_status(user):\n \"\"\" Returns the current Device Sync status \"\"\"\n\n sync_groups = []\n unsynced = []\n\n user = UserProxy.objects.from_user(user)\n for group in user.get_grouped_devices():\n uids = [device.uid for device in group.devices]\n\n if group.is_synced:\n sync_groups.append(uids)\n\n else:\n unsynced = uids\n\n return {\"synchronized\": sync_groups, \"not-synchronized\": unsynced}\n\n\ndef update_sync_status(user, synclist, stopsync):\n \"\"\"Updates the current Device Sync status\n\n Synchronisation between devices can be set up and stopped. Devices are\n identified by their UIDs. Unknown UIDs cause errors, no new devices are\n created.\"\"\"\n\n for devlist in synclist:\n\n if len(devlist) <= 1:\n raise ValueError(\"at least two devices are needed to sync\")\n\n # Setup all devices to sync with the first in the list\n uid = devlist[0]\n dev = user.client_set.get(uid=uid)\n\n for other_uid in devlist[1:]:\n other = user.get_device_by_uid(other_uid)\n dev.sync_with(other)\n\n for uid in stopsync:\n dev = user.get_device_by_uid(uid)\n try:\n dev.stop_sync()\n except ValueError:\n # if all devices of a sync-group are un-synced,\n # the last one will raise a ValueError, because it is no longer\n # being synced -- we just ignore it\n pass\n\n user.save()\n\n sync_user.delay(user)\n", "path": "mygpo/api/advanced/sync.py"}], "after_files": [{"content": "from django.http import HttpResponseBadRequest, HttpResponseNotFound\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.cache import never_cache\n\nfrom mygpo.decorators import allowed_methods, cors_origin\nfrom mygpo.utils import parse_request_body\nfrom mygpo.api.basic_auth import require_valid_user, check_username\nfrom mygpo.api.httpresponse import JsonResponse\nfrom mygpo.users.models import Client, UserProxy\nfrom mygpo.users.tasks import sync_user\n\n\n@csrf_exempt\n@require_valid_user\n@check_username\n@never_cache\n@allowed_methods([\"GET\", \"POST\"])\n@cors_origin()\ndef main(request, username):\n \"\"\" API Endpoint for Device Synchronisation \"\"\"\n\n if request.method == \"GET\":\n return JsonResponse(get_sync_status(request.user))\n\n else:\n try:\n actions = parse_request_body(request)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n\n synclist = actions.get(\"synchronize\", [])\n stopsync = actions.get(\"stop-synchronize\", [])\n\n try:\n update_sync_status(request.user, synclist, stopsync)\n except ValueError as e:\n return HttpResponseBadRequest(str(e))\n except Client.DoesNotExist as e:\n return HttpResponseNotFound(str(e))\n\n return JsonResponse(get_sync_status(request.user))\n\n\ndef get_sync_status(user):\n \"\"\" Returns the current Device Sync status \"\"\"\n\n sync_groups = []\n unsynced = []\n\n user = UserProxy.objects.from_user(user)\n for group in user.get_grouped_devices():\n uids = [device.uid for device in group.devices]\n\n if group.is_synced:\n sync_groups.append(uids)\n\n else:\n unsynced = uids\n\n return {\"synchronized\": sync_groups, \"not-synchronized\": unsynced}\n\n\ndef update_sync_status(user, synclist, stopsync):\n \"\"\"Updates the current Device Sync status\n\n Synchronisation between devices can be set up and stopped. Devices are\n identified by their UIDs. Unknown UIDs cause errors, no new devices are\n created.\"\"\"\n\n for devlist in synclist:\n\n if len(devlist) <= 1:\n raise ValueError(\"at least two devices are needed to sync\")\n\n # Setup all devices to sync with the first in the list\n uid = devlist[0]\n dev = user.client_set.get(uid=uid)\n\n for other_uid in devlist[1:]:\n other = user.client_set.get(uid=other_uid)\n dev.sync_with(other)\n\n for uid in stopsync:\n dev = user.client_set.get(uid=uid)\n try:\n dev.stop_sync()\n except ValueError:\n # if all devices of a sync-group are un-synced,\n # the last one will raise a ValueError, because it is no longer\n # being synced -- we just ignore it\n pass\n\n user.save()\n\n sync_user.delay(user)\n", "path": "mygpo/api/advanced/sync.py"}]} | 1,421 | 211 |
gh_patches_debug_67497 | rasdani/github-patches | git_diff | vllm-project__vllm-2887 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[v0.3.1] Release Tracker
**ETA**: Feb 14-16 th
## Major changes
TBD
## PRs to be merged before the release
- [x] #2855
- [x] #2845
- [x] ~~#2514~~
- [x] Ensure memory release when `LLM` class is deleted. #2882
- [x] #2875 #2880
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `vllm/__init__.py`
Content:
```
1 """vLLM: a high-throughput and memory-efficient inference engine for LLMs"""
2
3 from vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs
4 from vllm.engine.async_llm_engine import AsyncLLMEngine
5 from vllm.engine.llm_engine import LLMEngine
6 from vllm.engine.ray_utils import initialize_cluster
7 from vllm.entrypoints.llm import LLM
8 from vllm.outputs import CompletionOutput, RequestOutput
9 from vllm.sampling_params import SamplingParams
10
11 __version__ = "0.3.0"
12
13 __all__ = [
14 "LLM",
15 "SamplingParams",
16 "RequestOutput",
17 "CompletionOutput",
18 "LLMEngine",
19 "EngineArgs",
20 "AsyncLLMEngine",
21 "AsyncEngineArgs",
22 "initialize_cluster",
23 ]
24
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/vllm/__init__.py b/vllm/__init__.py
--- a/vllm/__init__.py
+++ b/vllm/__init__.py
@@ -8,7 +8,7 @@
from vllm.outputs import CompletionOutput, RequestOutput
from vllm.sampling_params import SamplingParams
-__version__ = "0.3.0"
+__version__ = "0.3.1"
__all__ = [
"LLM",
| {"golden_diff": "diff --git a/vllm/__init__.py b/vllm/__init__.py\n--- a/vllm/__init__.py\n+++ b/vllm/__init__.py\n@@ -8,7 +8,7 @@\n from vllm.outputs import CompletionOutput, RequestOutput\n from vllm.sampling_params import SamplingParams\n \n-__version__ = \"0.3.0\"\n+__version__ = \"0.3.1\"\n \n __all__ = [\n \"LLM\",\n", "issue": "[v0.3.1] Release Tracker\n**ETA**: Feb 14-16 th\r\n\r\n## Major changes\r\n\r\nTBD\r\n\r\n## PRs to be merged before the release\r\n\r\n- [x] #2855 \r\n- [x] #2845 \r\n- [x] ~~#2514~~\r\n- [x] Ensure memory release when `LLM` class is deleted. #2882 \r\n- [x] #2875 #2880\n", "before_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.3.0\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}], "after_files": [{"content": "\"\"\"vLLM: a high-throughput and memory-efficient inference engine for LLMs\"\"\"\n\nfrom vllm.engine.arg_utils import AsyncEngineArgs, EngineArgs\nfrom vllm.engine.async_llm_engine import AsyncLLMEngine\nfrom vllm.engine.llm_engine import LLMEngine\nfrom vllm.engine.ray_utils import initialize_cluster\nfrom vllm.entrypoints.llm import LLM\nfrom vllm.outputs import CompletionOutput, RequestOutput\nfrom vllm.sampling_params import SamplingParams\n\n__version__ = \"0.3.1\"\n\n__all__ = [\n \"LLM\",\n \"SamplingParams\",\n \"RequestOutput\",\n \"CompletionOutput\",\n \"LLMEngine\",\n \"EngineArgs\",\n \"AsyncLLMEngine\",\n \"AsyncEngineArgs\",\n \"initialize_cluster\",\n]\n", "path": "vllm/__init__.py"}]} | 586 | 108 |
gh_patches_debug_6598 | rasdani/github-patches | git_diff | holoviz__panel-2883 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
panel examples gives UnboundLocalError
#### ALL software version info
panel 0.12.4
#### Description of expected behavior and the observed behavior
`$ panel examples` doesn't raise an error
#### Complete, minimal, self-contained example code that reproduces the issue
Was taking a look at https://panel.holoviz.org/#id1
```
panel examples
```
#### Stack traceback and/or browser JavaScript console output
#### Screenshots or screencasts of the bug in action
<img width="846" alt="Screen Shot 2021-11-04 at 9 23 56 PM" src="https://user-images.githubusercontent.com/17162724/140442696-82e6c5c2-4cd6-40f6-821d-47c87f5e1541.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `panel/command/__init__.py`
Content:
```
1 """
2 Commandline interface to Panel
3 """
4 import sys
5 import argparse
6
7 from bokeh.__main__ import main as bokeh_entry_point
8 from bokeh.command.subcommands.serve import Serve as BkServe
9 from bokeh.command.util import die
10 from bokeh.util.string import nice_join
11
12 from .. import __version__
13 from .serve import Serve
14 from .oauth_secret import OAuthSecret
15
16
17 def transform_cmds(argv):
18 """
19 Allows usage with anaconda-project by remapping the argv list provided
20 into arguments accepted by Bokeh 0.12.7 or later.
21 """
22 replacements = {
23 '--anaconda-project-host':'--allow-websocket-origin',
24 '--anaconda-project-port': '--port',
25 '--anaconda-project-address': '--address'
26 }
27 transformed = []
28 skip = False
29 for arg in argv:
30 if skip:
31 skip = False
32 continue
33 if arg in replacements.keys():
34 transformed.append(replacements[arg])
35 elif arg == '--anaconda-project-iframe-hosts':
36 skip = True
37 continue
38 elif arg.startswith('--anaconda-project'):
39 continue
40 else:
41 transformed.append(arg)
42 return transformed
43
44
45 def main(args=None):
46 """Merges commands offered by pyct and bokeh and provides help for both"""
47 from bokeh.command.subcommands import all as bokeh_commands
48 bokeh_commands = bokeh_commands + [OAuthSecret]
49
50 try:
51 import pyct.cmd
52 pyct_commands = ['copy-examples', 'examples']
53 except Exception:
54 pass
55
56 parser = argparse.ArgumentParser(
57 prog="panel", epilog="See '<command> --help' to read about a specific subcommand."
58 )
59
60 parser.add_argument('-v', '--version', action='version', version=__version__)
61
62 subs = parser.add_subparsers(help="Sub-commands")
63
64 for cmd in pyct_commands:
65 cmd = cmd.replace('-', '_')
66 fn = getattr(pyct.cmd, cmd)
67 subs.add_parser(cmd, help=fn.__doc__)
68
69 for cls in bokeh_commands:
70 if cls is BkServe:
71 subparser = subs.add_parser(Serve.name, help=Serve.help)
72 subcommand = Serve(parser=subparser)
73 subparser.set_defaults(invoke=subcommand.invoke)
74 else:
75 subs.add_parser(cls.name, help=cls.help)
76
77 if len(sys.argv) == 1:
78 all_commands = sorted([c.name for c in bokeh_commands] + pyct_commands)
79 die("ERROR: Must specify subcommand, one of: %s" % nice_join(all_commands))
80
81 if sys.argv[1] in ('--help', '-h'):
82 args = parser.parse_args(sys.argv[1:])
83 args.invoke(args)
84 sys.exit()
85
86 if len(sys.argv) > 1 and any(sys.argv[1] == c.name for c in bokeh_commands):
87 sys.argv = transform_cmds(sys.argv)
88 if sys.argv[1] == 'serve':
89 args = parser.parse_args(sys.argv[1:])
90 try:
91 ret = args.invoke(args)
92 except Exception as e:
93 die("ERROR: " + str(e))
94 elif sys.argv[1] == 'oauth-secret':
95 ret = OAuthSecret(parser).invoke(args)
96 else:
97 ret = bokeh_entry_point()
98 elif sys.argv[1] in pyct_commands:
99 try:
100 import pyct.cmd
101 except ImportError:
102 print("install pyct to enable this command (e.g. `conda install -c pyviz pyct` or `pip install pyct[cmd]`)")
103 sys.exit(1)
104 pyct.cmd.substitute_main('panel', cmds=pyct_commands, args=args)
105 else:
106 parser.parse_args(sys.argv[1:])
107 sys.exit(1)
108
109 if ret is False:
110 sys.exit(1)
111 elif ret is not True and isinstance(ret, int) and ret != 0:
112 sys.exit(ret)
113
114
115
116 if __name__ == "__main__":
117 main()
118
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/panel/command/__init__.py b/panel/command/__init__.py
--- a/panel/command/__init__.py
+++ b/panel/command/__init__.py
@@ -102,6 +102,7 @@
print("install pyct to enable this command (e.g. `conda install -c pyviz pyct` or `pip install pyct[cmd]`)")
sys.exit(1)
pyct.cmd.substitute_main('panel', cmds=pyct_commands, args=args)
+ sys.exit()
else:
parser.parse_args(sys.argv[1:])
sys.exit(1)
| {"golden_diff": "diff --git a/panel/command/__init__.py b/panel/command/__init__.py\n--- a/panel/command/__init__.py\n+++ b/panel/command/__init__.py\n@@ -102,6 +102,7 @@\n print(\"install pyct to enable this command (e.g. `conda install -c pyviz pyct` or `pip install pyct[cmd]`)\")\n sys.exit(1)\n pyct.cmd.substitute_main('panel', cmds=pyct_commands, args=args)\n+ sys.exit()\n else:\n parser.parse_args(sys.argv[1:])\n sys.exit(1)\n", "issue": "panel examples gives UnboundLocalError\n#### ALL software version info\r\npanel 0.12.4\r\n\r\n#### Description of expected behavior and the observed behavior\r\n`$ panel examples` doesn't raise an error\r\n\r\n#### Complete, minimal, self-contained example code that reproduces the issue\r\n\r\nWas taking a look at https://panel.holoviz.org/#id1\r\n\r\n```\r\npanel examples\r\n```\r\n\r\n#### Stack traceback and/or browser JavaScript console output\r\n\r\n#### Screenshots or screencasts of the bug in action\r\n\r\n<img width=\"846\" alt=\"Screen Shot 2021-11-04 at 9 23 56 PM\" src=\"https://user-images.githubusercontent.com/17162724/140442696-82e6c5c2-4cd6-40f6-821d-47c87f5e1541.png\">\r\n\r\n\n", "before_files": [{"content": "\"\"\"\nCommandline interface to Panel\n\"\"\"\nimport sys\nimport argparse\n\nfrom bokeh.__main__ import main as bokeh_entry_point\nfrom bokeh.command.subcommands.serve import Serve as BkServe\nfrom bokeh.command.util import die\nfrom bokeh.util.string import nice_join\n\nfrom .. import __version__\nfrom .serve import Serve\nfrom .oauth_secret import OAuthSecret\n\n\ndef transform_cmds(argv):\n \"\"\"\n Allows usage with anaconda-project by remapping the argv list provided\n into arguments accepted by Bokeh 0.12.7 or later.\n \"\"\"\n replacements = {\n '--anaconda-project-host':'--allow-websocket-origin',\n '--anaconda-project-port': '--port',\n '--anaconda-project-address': '--address'\n }\n transformed = []\n skip = False\n for arg in argv:\n if skip:\n skip = False\n continue\n if arg in replacements.keys():\n transformed.append(replacements[arg])\n elif arg == '--anaconda-project-iframe-hosts':\n skip = True\n continue\n elif arg.startswith('--anaconda-project'):\n continue\n else:\n transformed.append(arg)\n return transformed\n\n\ndef main(args=None):\n \"\"\"Merges commands offered by pyct and bokeh and provides help for both\"\"\"\n from bokeh.command.subcommands import all as bokeh_commands\n bokeh_commands = bokeh_commands + [OAuthSecret]\n\n try:\n import pyct.cmd\n pyct_commands = ['copy-examples', 'examples']\n except Exception:\n pass\n\n parser = argparse.ArgumentParser(\n prog=\"panel\", epilog=\"See '<command> --help' to read about a specific subcommand.\"\n )\n\n parser.add_argument('-v', '--version', action='version', version=__version__)\n\n subs = parser.add_subparsers(help=\"Sub-commands\")\n\n for cmd in pyct_commands:\n cmd = cmd.replace('-', '_')\n fn = getattr(pyct.cmd, cmd)\n subs.add_parser(cmd, help=fn.__doc__)\n\n for cls in bokeh_commands:\n if cls is BkServe:\n subparser = subs.add_parser(Serve.name, help=Serve.help)\n subcommand = Serve(parser=subparser)\n subparser.set_defaults(invoke=subcommand.invoke)\n else:\n subs.add_parser(cls.name, help=cls.help)\n\n if len(sys.argv) == 1:\n all_commands = sorted([c.name for c in bokeh_commands] + pyct_commands)\n die(\"ERROR: Must specify subcommand, one of: %s\" % nice_join(all_commands))\n\n if sys.argv[1] in ('--help', '-h'):\n args = parser.parse_args(sys.argv[1:])\n args.invoke(args)\n sys.exit()\n\n if len(sys.argv) > 1 and any(sys.argv[1] == c.name for c in bokeh_commands):\n sys.argv = transform_cmds(sys.argv)\n if sys.argv[1] == 'serve':\n args = parser.parse_args(sys.argv[1:])\n try:\n ret = args.invoke(args)\n except Exception as e:\n die(\"ERROR: \" + str(e))\n elif sys.argv[1] == 'oauth-secret':\n ret = OAuthSecret(parser).invoke(args)\n else:\n ret = bokeh_entry_point()\n elif sys.argv[1] in pyct_commands:\n try:\n import pyct.cmd\n except ImportError:\n print(\"install pyct to enable this command (e.g. `conda install -c pyviz pyct` or `pip install pyct[cmd]`)\")\n sys.exit(1)\n pyct.cmd.substitute_main('panel', cmds=pyct_commands, args=args)\n else:\n parser.parse_args(sys.argv[1:])\n sys.exit(1)\n\n if ret is False:\n sys.exit(1)\n elif ret is not True and isinstance(ret, int) and ret != 0:\n sys.exit(ret)\n\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "panel/command/__init__.py"}], "after_files": [{"content": "\"\"\"\nCommandline interface to Panel\n\"\"\"\nimport sys\nimport argparse\n\nfrom bokeh.__main__ import main as bokeh_entry_point\nfrom bokeh.command.subcommands.serve import Serve as BkServe\nfrom bokeh.command.util import die\nfrom bokeh.util.string import nice_join\n\nfrom .. import __version__\nfrom .serve import Serve\nfrom .oauth_secret import OAuthSecret\n\n\ndef transform_cmds(argv):\n \"\"\"\n Allows usage with anaconda-project by remapping the argv list provided\n into arguments accepted by Bokeh 0.12.7 or later.\n \"\"\"\n replacements = {\n '--anaconda-project-host':'--allow-websocket-origin',\n '--anaconda-project-port': '--port',\n '--anaconda-project-address': '--address'\n }\n transformed = []\n skip = False\n for arg in argv:\n if skip:\n skip = False\n continue\n if arg in replacements.keys():\n transformed.append(replacements[arg])\n elif arg == '--anaconda-project-iframe-hosts':\n skip = True\n continue\n elif arg.startswith('--anaconda-project'):\n continue\n else:\n transformed.append(arg)\n return transformed\n\n\ndef main(args=None):\n \"\"\"Merges commands offered by pyct and bokeh and provides help for both\"\"\"\n from bokeh.command.subcommands import all as bokeh_commands\n bokeh_commands = bokeh_commands + [OAuthSecret]\n\n try:\n import pyct.cmd\n pyct_commands = ['copy-examples', 'examples']\n except Exception:\n pass\n\n parser = argparse.ArgumentParser(\n prog=\"panel\", epilog=\"See '<command> --help' to read about a specific subcommand.\"\n )\n\n parser.add_argument('-v', '--version', action='version', version=__version__)\n\n subs = parser.add_subparsers(help=\"Sub-commands\")\n\n for cmd in pyct_commands:\n cmd = cmd.replace('-', '_')\n fn = getattr(pyct.cmd, cmd)\n subs.add_parser(cmd, help=fn.__doc__)\n\n for cls in bokeh_commands:\n if cls is BkServe:\n subparser = subs.add_parser(Serve.name, help=Serve.help)\n subcommand = Serve(parser=subparser)\n subparser.set_defaults(invoke=subcommand.invoke)\n else:\n subs.add_parser(cls.name, help=cls.help)\n\n if len(sys.argv) == 1:\n all_commands = sorted([c.name for c in bokeh_commands] + pyct_commands)\n die(\"ERROR: Must specify subcommand, one of: %s\" % nice_join(all_commands))\n\n if sys.argv[1] in ('--help', '-h'):\n args = parser.parse_args(sys.argv[1:])\n args.invoke(args)\n sys.exit()\n\n if len(sys.argv) > 1 and any(sys.argv[1] == c.name for c in bokeh_commands):\n sys.argv = transform_cmds(sys.argv)\n if sys.argv[1] == 'serve':\n args = parser.parse_args(sys.argv[1:])\n try:\n ret = args.invoke(args)\n except Exception as e:\n die(\"ERROR: \" + str(e))\n elif sys.argv[1] == 'oauth-secret':\n ret = OAuthSecret(parser).invoke(args)\n else:\n ret = bokeh_entry_point()\n elif sys.argv[1] in pyct_commands:\n try:\n import pyct.cmd\n except ImportError:\n print(\"install pyct to enable this command (e.g. `conda install -c pyviz pyct` or `pip install pyct[cmd]`)\")\n sys.exit(1)\n pyct.cmd.substitute_main('panel', cmds=pyct_commands, args=args)\n sys.exit()\n else:\n parser.parse_args(sys.argv[1:])\n sys.exit(1)\n\n if ret is False:\n sys.exit(1)\n elif ret is not True and isinstance(ret, int) and ret != 0:\n sys.exit(ret)\n\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "panel/command/__init__.py"}]} | 1,576 | 137 |
gh_patches_debug_26307 | rasdani/github-patches | git_diff | web2py__web2py-2419 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to login when using redis for storing sessions
Basically a new session is created on each page load so the login is never established.
Issue discussed at: https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!msg/web2py/6Ig5YVgvIsI/HpueAUELBgAJ
Confirmed with web2py versions from 2.18.5 up to 2.20.4. Python versions 3.6 and 3.8
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gluon/contrib/redis_session.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 """
4 Developed by [email protected]
5 License MIT/BSD/GPL
6
7 Redis-backed sessions
8 """
9
10 import logging
11 from threading import Lock
12 from gluon import current
13 from gluon.storage import Storage
14 from gluon.contrib.redis_utils import acquire_lock, release_lock
15 from gluon.contrib.redis_utils import register_release_lock
16 from gluon._compat import to_native
17 from datetime import datetime
18
19 logger = logging.getLogger("web2py.session.redis")
20
21 locker = Lock()
22
23
24 def RedisSession(redis_conn, session_expiry=False, with_lock=False, db=None):
25 """
26 Usage example: put in models::
27
28 from gluon.contrib.redis_utils import RConn
29 rconn = RConn()
30 from gluon.contrib.redis_session import RedisSession
31 sessiondb = RedisSession(redis_conn=rconn, with_lock=True, session_expiry=False)
32 session.connect(request, response, db = sessiondb)
33
34 Args:
35 redis_conn: a redis-like connection object
36 with_lock: prevent concurrent modifications to the same session
37 session_expiry: delete automatically sessions after n seconds
38 (still need to run sessions2trash.py every 1M sessions
39 or so)
40
41 Simple slip-in storage for session
42 """
43
44 locker.acquire()
45 try:
46 instance_name = 'redis_instance_' + current.request.application
47 if not hasattr(RedisSession, instance_name):
48 setattr(RedisSession, instance_name,
49 RedisClient(redis_conn, session_expiry=session_expiry, with_lock=with_lock))
50 return getattr(RedisSession, instance_name)
51 finally:
52 locker.release()
53
54
55 class RedisClient(object):
56
57 def __init__(self, redis_conn, session_expiry=False, with_lock=False):
58 self.r_server = redis_conn
59 self._release_script = register_release_lock(self.r_server)
60 self.tablename = None
61 self.session_expiry = session_expiry
62 self.with_lock = with_lock
63
64 def get(self, what, default):
65 return self.tablename
66
67 def Field(self, fieldname, type='string', length=None, default=None,
68 required=False, requires=None):
69 return fieldname, type
70
71 def define_table(self, tablename, *fields, **args):
72 if not self.tablename:
73 self.tablename = MockTable(
74 self, self.r_server, tablename, self.session_expiry,
75 with_lock=self.with_lock, fields=fields)
76 return self.tablename
77
78 def __getitem__(self, key):
79 return self.tablename
80
81 def __call__(self, where=''):
82 q = self.tablename.query
83 return q
84
85 def commit(self):
86 # this is only called by session2trash.py
87 pass
88
89 def convert_dict_string(self, dict_string):
90 fields = self.tablename.fields
91 typed_dict = dict()
92 converters = {
93 'boolean': lambda x: 1 if x.decode() == '1' else 0,
94 'blob': lambda x: x,
95 }
96 for field, ftype in fields:
97 if field not in dict_string:
98 continue
99 if ftype in converters:
100 typed_dict[field] = converters[ftype](dict_string[field])
101 else:
102 typed_dict[field] = dict_string[field].decode()
103 return typed_dict
104
105
106 class MockTable(object):
107
108 def __init__(self, db, r_server, tablename, session_expiry, with_lock=False, fields=None):
109 # here self.db is the RedisClient instance
110 self.db = db
111 self.tablename = tablename
112 # set the namespace for sessions of this app
113 self.keyprefix = 'w2p:sess:%s' % tablename.replace('web2py_session_', '')
114 # fast auto-increment id (needed for session handling)
115 self.serial = "%s:serial" % self.keyprefix
116 # index of all the session keys of this app
117 self.id_idx = "%s:id_idx" % self.keyprefix
118 # remember the session_expiry setting
119 self.session_expiry = session_expiry
120 self.with_lock = with_lock
121 self.fields = fields if fields is not None else []
122
123 def __call__(self, record_id, unique_key=None):
124 # Support DAL shortcut query: table(record_id)
125
126 # This will call the __getattr__ below
127 # returning a MockQuery
128 q = self.id
129
130 # Instructs MockQuery, to behave as db(table.id == record_id)
131 q.op = 'eq'
132 q.value = record_id
133 q.unique_key = unique_key
134
135 row = q.select()
136 return row[0] if row else Storage()
137
138 def __getattr__(self, key):
139 if key == 'id':
140 # return a fake query. We need to query it just by id for normal operations
141 self.query = MockQuery(
142 field='id', db=self.db,
143 prefix=self.keyprefix, session_expiry=self.session_expiry,
144 with_lock=self.with_lock, unique_key=self.unique_key
145 )
146 return self.query
147 elif key == '_db':
148 # needed because of the calls in sessions2trash.py and globals.py
149 return self.db
150
151 def insert(self, **kwargs):
152 # usually kwargs would be a Storage with several keys:
153 # 'locked', 'client_ip','created_datetime','modified_datetime'
154 # 'unique_key', 'session_data'
155 # retrieve a new key
156 newid = str(self.db.r_server.incr(self.serial))
157 key = self.keyprefix + ':' + newid
158 if self.with_lock:
159 key_lock = key + ':lock'
160 acquire_lock(self.db.r_server, key_lock, newid)
161 with self.db.r_server.pipeline() as pipe:
162 # add it to the index
163 pipe.sadd(self.id_idx, key)
164 # set a hash key with the Storage
165 pipe.hmset(key, kwargs)
166 if self.session_expiry:
167 pipe.expire(key, self.session_expiry)
168 pipe.execute()
169 if self.with_lock:
170 release_lock(self.db, key_lock, newid)
171 return newid
172
173
174 class MockQuery(object):
175 """a fake Query object that supports querying by id
176 and listing all keys. No other operation is supported
177 """
178 def __init__(self, field=None, db=None, prefix=None, session_expiry=False,
179 with_lock=False, unique_key=None):
180 self.field = field
181 self.value = None
182 self.db = db
183 self.keyprefix = prefix
184 self.op = None
185 self.session_expiry = session_expiry
186 self.with_lock = with_lock
187 self.unique_key = unique_key
188
189 def __eq__(self, value, op='eq'):
190 self.value = value
191 self.op = op
192
193 def __ge__(self, value, op='ge'):
194 self.value = value
195 self.op = op
196
197 def __gt__(self, value, op='gt'):
198 self.value = value
199 self.op = op
200
201 def select(self):
202 if self.op == 'eq' and self.field == 'id' and self.value:
203 # means that someone wants to retrieve the key self.value
204 key = self.keyprefix + ':' + str(self.value)
205 if self.with_lock:
206 acquire_lock(self.db.r_server, key + ':lock', self.value, 2)
207 rtn = {to_native(k): v for k, v in self.db.r_server.hgetall(key).items()}
208 if rtn:
209 if self.unique_key:
210 # make sure the id and unique_key are correct
211 if rtn['unique_key'] == to_native(self.unique_key):
212 rtn['update_record'] = self.update # update record support
213 else:
214 rtn = None
215 return [Storage(self.db.convert_dict_string(rtn))] if rtn else []
216 elif self.op in ('ge', 'gt') and self.field == 'id' and self.value == 0:
217 # means that someone wants the complete list
218 rtn = []
219 id_idx = "%s:id_idx" % self.keyprefix
220 # find all session keys of this app
221 allkeys = self.db.r_server.smembers(id_idx)
222 for sess in allkeys:
223 val = self.db.r_server.hgetall(sess)
224 if not val:
225 if self.session_expiry:
226 # clean up the idx, because the key expired
227 self.db.r_server.srem(id_idx, sess)
228 continue
229 val = Storage(self.db.convert_dict_string(val))
230 # add a delete_record method (necessary for sessions2trash.py)
231 val.delete_record = RecordDeleter(
232 self.db, sess, self.keyprefix)
233 rtn.append(val)
234 return rtn
235 else:
236 raise Exception("Operation not supported")
237
238 def update(self, **kwargs):
239 # means that the session has been found and needs an update
240 if self.op == 'eq' and self.field == 'id' and self.value:
241 key = self.keyprefix + ':' + str(self.value)
242 if not self.db.r_server.exists(key):
243 return None
244 with self.db.r_server.pipeline() as pipe:
245 pipe.hmset(key, kwargs)
246 if self.session_expiry:
247 pipe.expire(key, self.session_expiry)
248 rtn = pipe.execute()[0]
249 if self.with_lock:
250 release_lock(self.db, key + ':lock', self.value)
251 return rtn
252
253 def delete(self, **kwargs):
254 # means that we want this session to be deleted
255 if self.op == 'eq' and self.field == 'id' and self.value:
256 id_idx = "%s:id_idx" % self.keyprefix
257 key = self.keyprefix + ':' + str(self.value)
258 with self.db.r_server.pipeline() as pipe:
259 pipe.delete(key)
260 pipe.srem(id_idx, key)
261 rtn = pipe.execute()
262 return rtn[1]
263
264
265 class RecordDeleter(object):
266 """Dumb record deleter to support sessions2trash.py"""
267
268 def __init__(self, db, key, keyprefix):
269 self.db, self.key, self.keyprefix = db, key, keyprefix
270
271 def __call__(self):
272 id_idx = "%s:id_idx" % self.keyprefix
273 # remove from the index
274 self.db.r_server.srem(id_idx, self.key)
275 # remove the key itself
276 self.db.r_server.delete(self.key)
277
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/gluon/contrib/redis_session.py b/gluon/contrib/redis_session.py
--- a/gluon/contrib/redis_session.py
+++ b/gluon/contrib/redis_session.py
@@ -13,7 +13,7 @@
from gluon.storage import Storage
from gluon.contrib.redis_utils import acquire_lock, release_lock
from gluon.contrib.redis_utils import register_release_lock
-from gluon._compat import to_native
+from gluon._compat import to_native, to_bytes
from datetime import datetime
logger = logging.getLogger("web2py.session.redis")
@@ -131,7 +131,6 @@
q.op = 'eq'
q.value = record_id
q.unique_key = unique_key
-
row = q.select()
return row[0] if row else Storage()
@@ -208,7 +207,7 @@
if rtn:
if self.unique_key:
# make sure the id and unique_key are correct
- if rtn['unique_key'] == to_native(self.unique_key):
+ if rtn['unique_key'] == to_bytes(self.unique_key):
rtn['update_record'] = self.update # update record support
else:
rtn = None
| {"golden_diff": "diff --git a/gluon/contrib/redis_session.py b/gluon/contrib/redis_session.py\n--- a/gluon/contrib/redis_session.py\n+++ b/gluon/contrib/redis_session.py\n@@ -13,7 +13,7 @@\n from gluon.storage import Storage\n from gluon.contrib.redis_utils import acquire_lock, release_lock\n from gluon.contrib.redis_utils import register_release_lock\n-from gluon._compat import to_native\n+from gluon._compat import to_native, to_bytes\n from datetime import datetime\n \n logger = logging.getLogger(\"web2py.session.redis\")\n@@ -131,7 +131,6 @@\n q.op = 'eq'\n q.value = record_id\n q.unique_key = unique_key\n-\n row = q.select()\n return row[0] if row else Storage()\n \n@@ -208,7 +207,7 @@\n if rtn:\n if self.unique_key:\n # make sure the id and unique_key are correct\n- if rtn['unique_key'] == to_native(self.unique_key):\n+ if rtn['unique_key'] == to_bytes(self.unique_key):\n rtn['update_record'] = self.update # update record support\n else:\n rtn = None\n", "issue": "Unable to login when using redis for storing sessions\nBasically a new session is created on each page load so the login is never established.\r\n\r\nIssue discussed at: https://groups.google.com/forum/?utm_medium=email&utm_source=footer#!msg/web2py/6Ig5YVgvIsI/HpueAUELBgAJ\r\n\r\nConfirmed with web2py versions from 2.18.5 up to 2.20.4. Python versions 3.6 and 3.8\r\n\r\n\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nRedis-backed sessions\n\"\"\"\n\nimport logging\nfrom threading import Lock\nfrom gluon import current\nfrom gluon.storage import Storage\nfrom gluon.contrib.redis_utils import acquire_lock, release_lock\nfrom gluon.contrib.redis_utils import register_release_lock\nfrom gluon._compat import to_native\nfrom datetime import datetime\n\nlogger = logging.getLogger(\"web2py.session.redis\")\n\nlocker = Lock()\n\n\ndef RedisSession(redis_conn, session_expiry=False, with_lock=False, db=None):\n \"\"\"\n Usage example: put in models::\n\n from gluon.contrib.redis_utils import RConn\n rconn = RConn()\n from gluon.contrib.redis_session import RedisSession\n sessiondb = RedisSession(redis_conn=rconn, with_lock=True, session_expiry=False)\n session.connect(request, response, db = sessiondb)\n\n Args:\n redis_conn: a redis-like connection object\n with_lock: prevent concurrent modifications to the same session\n session_expiry: delete automatically sessions after n seconds\n (still need to run sessions2trash.py every 1M sessions\n or so)\n\n Simple slip-in storage for session\n \"\"\"\n\n locker.acquire()\n try:\n instance_name = 'redis_instance_' + current.request.application\n if not hasattr(RedisSession, instance_name):\n setattr(RedisSession, instance_name,\n RedisClient(redis_conn, session_expiry=session_expiry, with_lock=with_lock))\n return getattr(RedisSession, instance_name)\n finally:\n locker.release()\n\n\nclass RedisClient(object):\n\n def __init__(self, redis_conn, session_expiry=False, with_lock=False):\n self.r_server = redis_conn\n self._release_script = register_release_lock(self.r_server)\n self.tablename = None\n self.session_expiry = session_expiry\n self.with_lock = with_lock\n\n def get(self, what, default):\n return self.tablename\n\n def Field(self, fieldname, type='string', length=None, default=None,\n required=False, requires=None):\n return fieldname, type\n\n def define_table(self, tablename, *fields, **args):\n if not self.tablename:\n self.tablename = MockTable(\n self, self.r_server, tablename, self.session_expiry,\n with_lock=self.with_lock, fields=fields)\n return self.tablename\n\n def __getitem__(self, key):\n return self.tablename\n\n def __call__(self, where=''):\n q = self.tablename.query\n return q\n\n def commit(self):\n # this is only called by session2trash.py\n pass\n\n def convert_dict_string(self, dict_string):\n fields = self.tablename.fields\n typed_dict = dict()\n converters = {\n 'boolean': lambda x: 1 if x.decode() == '1' else 0,\n 'blob': lambda x: x,\n }\n for field, ftype in fields:\n if field not in dict_string:\n continue\n if ftype in converters:\n typed_dict[field] = converters[ftype](dict_string[field])\n else:\n typed_dict[field] = dict_string[field].decode()\n return typed_dict\n\n\nclass MockTable(object):\n\n def __init__(self, db, r_server, tablename, session_expiry, with_lock=False, fields=None):\n # here self.db is the RedisClient instance\n self.db = db\n self.tablename = tablename\n # set the namespace for sessions of this app\n self.keyprefix = 'w2p:sess:%s' % tablename.replace('web2py_session_', '')\n # fast auto-increment id (needed for session handling)\n self.serial = \"%s:serial\" % self.keyprefix\n # index of all the session keys of this app\n self.id_idx = \"%s:id_idx\" % self.keyprefix\n # remember the session_expiry setting\n self.session_expiry = session_expiry\n self.with_lock = with_lock\n self.fields = fields if fields is not None else []\n\n def __call__(self, record_id, unique_key=None):\n # Support DAL shortcut query: table(record_id)\n\n # This will call the __getattr__ below\n # returning a MockQuery\n q = self.id\n\n # Instructs MockQuery, to behave as db(table.id == record_id)\n q.op = 'eq'\n q.value = record_id\n q.unique_key = unique_key\n\n row = q.select()\n return row[0] if row else Storage()\n\n def __getattr__(self, key):\n if key == 'id':\n # return a fake query. We need to query it just by id for normal operations\n self.query = MockQuery(\n field='id', db=self.db,\n prefix=self.keyprefix, session_expiry=self.session_expiry,\n with_lock=self.with_lock, unique_key=self.unique_key\n )\n return self.query\n elif key == '_db':\n # needed because of the calls in sessions2trash.py and globals.py\n return self.db\n\n def insert(self, **kwargs):\n # usually kwargs would be a Storage with several keys:\n # 'locked', 'client_ip','created_datetime','modified_datetime'\n # 'unique_key', 'session_data'\n # retrieve a new key\n newid = str(self.db.r_server.incr(self.serial))\n key = self.keyprefix + ':' + newid\n if self.with_lock:\n key_lock = key + ':lock'\n acquire_lock(self.db.r_server, key_lock, newid)\n with self.db.r_server.pipeline() as pipe:\n # add it to the index\n pipe.sadd(self.id_idx, key)\n # set a hash key with the Storage\n pipe.hmset(key, kwargs)\n if self.session_expiry:\n pipe.expire(key, self.session_expiry)\n pipe.execute()\n if self.with_lock:\n release_lock(self.db, key_lock, newid)\n return newid\n\n\nclass MockQuery(object):\n \"\"\"a fake Query object that supports querying by id\n and listing all keys. No other operation is supported\n \"\"\"\n def __init__(self, field=None, db=None, prefix=None, session_expiry=False,\n with_lock=False, unique_key=None):\n self.field = field\n self.value = None\n self.db = db\n self.keyprefix = prefix\n self.op = None\n self.session_expiry = session_expiry\n self.with_lock = with_lock\n self.unique_key = unique_key\n\n def __eq__(self, value, op='eq'):\n self.value = value\n self.op = op\n\n def __ge__(self, value, op='ge'):\n self.value = value\n self.op = op\n\n def __gt__(self, value, op='gt'):\n self.value = value\n self.op = op\n\n def select(self):\n if self.op == 'eq' and self.field == 'id' and self.value:\n # means that someone wants to retrieve the key self.value\n key = self.keyprefix + ':' + str(self.value)\n if self.with_lock:\n acquire_lock(self.db.r_server, key + ':lock', self.value, 2)\n rtn = {to_native(k): v for k, v in self.db.r_server.hgetall(key).items()}\n if rtn:\n if self.unique_key:\n # make sure the id and unique_key are correct\n if rtn['unique_key'] == to_native(self.unique_key):\n rtn['update_record'] = self.update # update record support\n else:\n rtn = None\n return [Storage(self.db.convert_dict_string(rtn))] if rtn else []\n elif self.op in ('ge', 'gt') and self.field == 'id' and self.value == 0:\n # means that someone wants the complete list\n rtn = []\n id_idx = \"%s:id_idx\" % self.keyprefix\n # find all session keys of this app\n allkeys = self.db.r_server.smembers(id_idx)\n for sess in allkeys:\n val = self.db.r_server.hgetall(sess)\n if not val:\n if self.session_expiry:\n # clean up the idx, because the key expired\n self.db.r_server.srem(id_idx, sess)\n continue\n val = Storage(self.db.convert_dict_string(val))\n # add a delete_record method (necessary for sessions2trash.py)\n val.delete_record = RecordDeleter(\n self.db, sess, self.keyprefix)\n rtn.append(val)\n return rtn\n else:\n raise Exception(\"Operation not supported\")\n\n def update(self, **kwargs):\n # means that the session has been found and needs an update\n if self.op == 'eq' and self.field == 'id' and self.value:\n key = self.keyprefix + ':' + str(self.value)\n if not self.db.r_server.exists(key):\n return None\n with self.db.r_server.pipeline() as pipe:\n pipe.hmset(key, kwargs)\n if self.session_expiry:\n pipe.expire(key, self.session_expiry)\n rtn = pipe.execute()[0]\n if self.with_lock:\n release_lock(self.db, key + ':lock', self.value)\n return rtn\n\n def delete(self, **kwargs):\n # means that we want this session to be deleted\n if self.op == 'eq' and self.field == 'id' and self.value:\n id_idx = \"%s:id_idx\" % self.keyprefix\n key = self.keyprefix + ':' + str(self.value)\n with self.db.r_server.pipeline() as pipe:\n pipe.delete(key)\n pipe.srem(id_idx, key)\n rtn = pipe.execute()\n return rtn[1]\n\n\nclass RecordDeleter(object):\n \"\"\"Dumb record deleter to support sessions2trash.py\"\"\"\n\n def __init__(self, db, key, keyprefix):\n self.db, self.key, self.keyprefix = db, key, keyprefix\n\n def __call__(self):\n id_idx = \"%s:id_idx\" % self.keyprefix\n # remove from the index\n self.db.r_server.srem(id_idx, self.key)\n # remove the key itself\n self.db.r_server.delete(self.key)\n", "path": "gluon/contrib/redis_session.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nDeveloped by [email protected]\nLicense MIT/BSD/GPL\n\nRedis-backed sessions\n\"\"\"\n\nimport logging\nfrom threading import Lock\nfrom gluon import current\nfrom gluon.storage import Storage\nfrom gluon.contrib.redis_utils import acquire_lock, release_lock\nfrom gluon.contrib.redis_utils import register_release_lock\nfrom gluon._compat import to_native, to_bytes\nfrom datetime import datetime\n\nlogger = logging.getLogger(\"web2py.session.redis\")\n\nlocker = Lock()\n\n\ndef RedisSession(redis_conn, session_expiry=False, with_lock=False, db=None):\n \"\"\"\n Usage example: put in models::\n\n from gluon.contrib.redis_utils import RConn\n rconn = RConn()\n from gluon.contrib.redis_session import RedisSession\n sessiondb = RedisSession(redis_conn=rconn, with_lock=True, session_expiry=False)\n session.connect(request, response, db = sessiondb)\n\n Args:\n redis_conn: a redis-like connection object\n with_lock: prevent concurrent modifications to the same session\n session_expiry: delete automatically sessions after n seconds\n (still need to run sessions2trash.py every 1M sessions\n or so)\n\n Simple slip-in storage for session\n \"\"\"\n\n locker.acquire()\n try:\n instance_name = 'redis_instance_' + current.request.application\n if not hasattr(RedisSession, instance_name):\n setattr(RedisSession, instance_name,\n RedisClient(redis_conn, session_expiry=session_expiry, with_lock=with_lock))\n return getattr(RedisSession, instance_name)\n finally:\n locker.release()\n\n\nclass RedisClient(object):\n\n def __init__(self, redis_conn, session_expiry=False, with_lock=False):\n self.r_server = redis_conn\n self._release_script = register_release_lock(self.r_server)\n self.tablename = None\n self.session_expiry = session_expiry\n self.with_lock = with_lock\n\n def get(self, what, default):\n return self.tablename\n\n def Field(self, fieldname, type='string', length=None, default=None,\n required=False, requires=None):\n return fieldname, type\n\n def define_table(self, tablename, *fields, **args):\n if not self.tablename:\n self.tablename = MockTable(\n self, self.r_server, tablename, self.session_expiry,\n with_lock=self.with_lock, fields=fields)\n return self.tablename\n\n def __getitem__(self, key):\n return self.tablename\n\n def __call__(self, where=''):\n q = self.tablename.query\n return q\n\n def commit(self):\n # this is only called by session2trash.py\n pass\n\n def convert_dict_string(self, dict_string):\n fields = self.tablename.fields\n typed_dict = dict()\n converters = {\n 'boolean': lambda x: 1 if x.decode() == '1' else 0,\n 'blob': lambda x: x,\n }\n for field, ftype in fields:\n if field not in dict_string:\n continue\n if ftype in converters:\n typed_dict[field] = converters[ftype](dict_string[field])\n else:\n typed_dict[field] = dict_string[field].decode()\n return typed_dict\n\n\nclass MockTable(object):\n\n def __init__(self, db, r_server, tablename, session_expiry, with_lock=False, fields=None):\n # here self.db is the RedisClient instance\n self.db = db\n self.tablename = tablename\n # set the namespace for sessions of this app\n self.keyprefix = 'w2p:sess:%s' % tablename.replace('web2py_session_', '')\n # fast auto-increment id (needed for session handling)\n self.serial = \"%s:serial\" % self.keyprefix\n # index of all the session keys of this app\n self.id_idx = \"%s:id_idx\" % self.keyprefix\n # remember the session_expiry setting\n self.session_expiry = session_expiry\n self.with_lock = with_lock\n self.fields = fields if fields is not None else []\n\n def __call__(self, record_id, unique_key=None):\n # Support DAL shortcut query: table(record_id)\n\n # This will call the __getattr__ below\n # returning a MockQuery\n q = self.id\n\n # Instructs MockQuery, to behave as db(table.id == record_id)\n q.op = 'eq'\n q.value = record_id\n q.unique_key = unique_key\n row = q.select()\n return row[0] if row else Storage()\n\n def __getattr__(self, key):\n if key == 'id':\n # return a fake query. We need to query it just by id for normal operations\n self.query = MockQuery(\n field='id', db=self.db,\n prefix=self.keyprefix, session_expiry=self.session_expiry,\n with_lock=self.with_lock, unique_key=self.unique_key\n )\n return self.query\n elif key == '_db':\n # needed because of the calls in sessions2trash.py and globals.py\n return self.db\n\n def insert(self, **kwargs):\n # usually kwargs would be a Storage with several keys:\n # 'locked', 'client_ip','created_datetime','modified_datetime'\n # 'unique_key', 'session_data'\n # retrieve a new key\n newid = str(self.db.r_server.incr(self.serial))\n key = self.keyprefix + ':' + newid\n if self.with_lock:\n key_lock = key + ':lock'\n acquire_lock(self.db.r_server, key_lock, newid)\n with self.db.r_server.pipeline() as pipe:\n # add it to the index\n pipe.sadd(self.id_idx, key)\n # set a hash key with the Storage\n pipe.hmset(key, kwargs)\n if self.session_expiry:\n pipe.expire(key, self.session_expiry)\n pipe.execute()\n if self.with_lock:\n release_lock(self.db, key_lock, newid)\n return newid\n\n\nclass MockQuery(object):\n \"\"\"a fake Query object that supports querying by id\n and listing all keys. No other operation is supported\n \"\"\"\n def __init__(self, field=None, db=None, prefix=None, session_expiry=False,\n with_lock=False, unique_key=None):\n self.field = field\n self.value = None\n self.db = db\n self.keyprefix = prefix\n self.op = None\n self.session_expiry = session_expiry\n self.with_lock = with_lock\n self.unique_key = unique_key\n\n def __eq__(self, value, op='eq'):\n self.value = value\n self.op = op\n\n def __ge__(self, value, op='ge'):\n self.value = value\n self.op = op\n\n def __gt__(self, value, op='gt'):\n self.value = value\n self.op = op\n\n def select(self):\n if self.op == 'eq' and self.field == 'id' and self.value:\n # means that someone wants to retrieve the key self.value\n key = self.keyprefix + ':' + str(self.value)\n if self.with_lock:\n acquire_lock(self.db.r_server, key + ':lock', self.value, 2)\n rtn = {to_native(k): v for k, v in self.db.r_server.hgetall(key).items()}\n if rtn:\n if self.unique_key:\n # make sure the id and unique_key are correct\n if rtn['unique_key'] == to_bytes(self.unique_key):\n rtn['update_record'] = self.update # update record support\n else:\n rtn = None\n return [Storage(self.db.convert_dict_string(rtn))] if rtn else []\n elif self.op in ('ge', 'gt') and self.field == 'id' and self.value == 0:\n # means that someone wants the complete list\n rtn = []\n id_idx = \"%s:id_idx\" % self.keyprefix\n # find all session keys of this app\n allkeys = self.db.r_server.smembers(id_idx)\n for sess in allkeys:\n val = self.db.r_server.hgetall(sess)\n if not val:\n if self.session_expiry:\n # clean up the idx, because the key expired\n self.db.r_server.srem(id_idx, sess)\n continue\n val = Storage(self.db.convert_dict_string(val))\n # add a delete_record method (necessary for sessions2trash.py)\n val.delete_record = RecordDeleter(\n self.db, sess, self.keyprefix)\n rtn.append(val)\n return rtn\n else:\n raise Exception(\"Operation not supported\")\n\n def update(self, **kwargs):\n # means that the session has been found and needs an update\n if self.op == 'eq' and self.field == 'id' and self.value:\n key = self.keyprefix + ':' + str(self.value)\n if not self.db.r_server.exists(key):\n return None\n with self.db.r_server.pipeline() as pipe:\n pipe.hmset(key, kwargs)\n if self.session_expiry:\n pipe.expire(key, self.session_expiry)\n rtn = pipe.execute()[0]\n if self.with_lock:\n release_lock(self.db, key + ':lock', self.value)\n return rtn\n\n def delete(self, **kwargs):\n # means that we want this session to be deleted\n if self.op == 'eq' and self.field == 'id' and self.value:\n id_idx = \"%s:id_idx\" % self.keyprefix\n key = self.keyprefix + ':' + str(self.value)\n with self.db.r_server.pipeline() as pipe:\n pipe.delete(key)\n pipe.srem(id_idx, key)\n rtn = pipe.execute()\n return rtn[1]\n\n\nclass RecordDeleter(object):\n \"\"\"Dumb record deleter to support sessions2trash.py\"\"\"\n\n def __init__(self, db, key, keyprefix):\n self.db, self.key, self.keyprefix = db, key, keyprefix\n\n def __call__(self):\n id_idx = \"%s:id_idx\" % self.keyprefix\n # remove from the index\n self.db.r_server.srem(id_idx, self.key)\n # remove the key itself\n self.db.r_server.delete(self.key)\n", "path": "gluon/contrib/redis_session.py"}]} | 3,325 | 273 |
gh_patches_debug_25093 | rasdani/github-patches | git_diff | urllib3__urllib3-1609 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade packaged rfc3986
Upgrade to v1.3.2
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/urllib3/packages/rfc3986/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014 Rackspace
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12 # implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15
16 """
17 An implementation of semantics and validations described in RFC 3986.
18
19 See http://rfc3986.readthedocs.io/ for detailed documentation.
20
21 :copyright: (c) 2014 Rackspace
22 :license: Apache v2.0, see LICENSE for details
23 """
24
25 from .api import iri_reference
26 from .api import IRIReference
27 from .api import is_valid_uri
28 from .api import normalize_uri
29 from .api import uri_reference
30 from .api import URIReference
31 from .api import urlparse
32 from .parseresult import ParseResult
33
34 __title__ = 'rfc3986'
35 __author__ = 'Ian Stapleton Cordasco'
36 __author_email__ = '[email protected]'
37 __license__ = 'Apache v2.0'
38 __copyright__ = 'Copyright 2014 Rackspace'
39 __version__ = '1.3.1'
40
41 __all__ = (
42 'ParseResult',
43 'URIReference',
44 'IRIReference',
45 'is_valid_uri',
46 'normalize_uri',
47 'uri_reference',
48 'iri_reference',
49 'urlparse',
50 '__title__',
51 '__author__',
52 '__author_email__',
53 '__license__',
54 '__copyright__',
55 '__version__',
56 )
57
```
Path: `src/urllib3/packages/rfc3986/misc.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Copyright (c) 2014 Rackspace
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
12 # implied.
13 # See the License for the specific language governing permissions and
14 # limitations under the License.
15 """
16 Module containing compiled regular expressions and constants.
17
18 This module contains important constants, patterns, and compiled regular
19 expressions for parsing and validating URIs and their components.
20 """
21
22 import re
23
24 from . import abnf_regexp
25
26 # These are enumerated for the named tuple used as a superclass of
27 # URIReference
28 URI_COMPONENTS = ['scheme', 'authority', 'path', 'query', 'fragment']
29
30 important_characters = {
31 'generic_delimiters': abnf_regexp.GENERIC_DELIMITERS,
32 'sub_delimiters': abnf_regexp.SUB_DELIMITERS,
33 # We need to escape the '*' in this case
34 're_sub_delimiters': abnf_regexp.SUB_DELIMITERS_RE,
35 'unreserved_chars': abnf_regexp.UNRESERVED_CHARS,
36 # We need to escape the '-' in this case:
37 're_unreserved': abnf_regexp.UNRESERVED_RE,
38 }
39
40 # For details about delimiters and reserved characters, see:
41 # http://tools.ietf.org/html/rfc3986#section-2.2
42 GENERIC_DELIMITERS = abnf_regexp.GENERIC_DELIMITERS_SET
43 SUB_DELIMITERS = abnf_regexp.SUB_DELIMITERS_SET
44 RESERVED_CHARS = abnf_regexp.RESERVED_CHARS_SET
45 # For details about unreserved characters, see:
46 # http://tools.ietf.org/html/rfc3986#section-2.3
47 UNRESERVED_CHARS = abnf_regexp.UNRESERVED_CHARS_SET
48 NON_PCT_ENCODED = abnf_regexp.NON_PCT_ENCODED_SET
49
50 URI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE)
51
52 SUBAUTHORITY_MATCHER = re.compile((
53 '^(?:(?P<userinfo>{0})@)?' # userinfo
54 '(?P<host>{1})' # host
55 ':?(?P<port>{2})?$' # port
56 ).format(abnf_regexp.USERINFO_RE,
57 abnf_regexp.HOST_PATTERN,
58 abnf_regexp.PORT_RE))
59
60
61 HOST_MATCHER = re.compile('^' + abnf_regexp.HOST_RE + '$')
62 IPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$')
63 IPv6_MATCHER = re.compile(r'^\[' + abnf_regexp.IPv6_ADDRZ_RFC4007_RE + r'\]$')
64
65 # Used by host validator
66 IPv6_NO_RFC4007_MATCHER = re.compile(r'^\[%s\]$' % (
67 abnf_regexp.IPv6_ADDRZ_RE
68 ))
69
70 # Matcher used to validate path components
71 PATH_MATCHER = re.compile(abnf_regexp.PATH_RE)
72
73
74 # ##################################
75 # Query and Fragment Matcher Section
76 # ##################################
77
78 QUERY_MATCHER = re.compile(abnf_regexp.QUERY_RE)
79
80 FRAGMENT_MATCHER = QUERY_MATCHER
81
82 # Scheme validation, see: http://tools.ietf.org/html/rfc3986#section-3.1
83 SCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE))
84
85 RELATIVE_REF_MATCHER = re.compile(r'^%s(\?%s)?(#%s)?$' % (
86 abnf_regexp.RELATIVE_PART_RE,
87 abnf_regexp.QUERY_RE,
88 abnf_regexp.FRAGMENT_RE,
89 ))
90
91 # See http://tools.ietf.org/html/rfc3986#section-4.3
92 ABSOLUTE_URI_MATCHER = re.compile(r'^%s:%s(\?%s)?$' % (
93 abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],
94 abnf_regexp.HIER_PART_RE,
95 abnf_regexp.QUERY_RE[1:-1],
96 ))
97
98 # ###############
99 # IRIs / RFC 3987
100 # ###############
101
102 IRI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE, re.UNICODE)
103
104 ISUBAUTHORITY_MATCHER = re.compile((
105 u'^(?:(?P<userinfo>{0})@)?' # iuserinfo
106 u'(?P<host>{1})' # ihost
107 u':?(?P<port>{2})?$' # port
108 ).format(abnf_regexp.IUSERINFO_RE,
109 abnf_regexp.IHOST_RE,
110 abnf_regexp.PORT_RE), re.UNICODE)
111
112
113 IHOST_MATCHER = re.compile('^' + abnf_regexp.IHOST_RE + '$', re.UNICODE)
114
115 IPATH_MATCHER = re.compile(abnf_regexp.IPATH_RE, re.UNICODE)
116
117 IQUERY_MATCHER = re.compile(abnf_regexp.IQUERY_RE, re.UNICODE)
118
119 IFRAGMENT_MATCHER = re.compile(abnf_regexp.IFRAGMENT_RE, re.UNICODE)
120
121
122 RELATIVE_IRI_MATCHER = re.compile(u'^%s(?:\\?%s)?(?:%s)?$' % (
123 abnf_regexp.IRELATIVE_PART_RE,
124 abnf_regexp.IQUERY_RE,
125 abnf_regexp.IFRAGMENT_RE
126 ), re.UNICODE)
127
128 ABSOLUTE_IRI_MATCHER = re.compile(u'^%s:%s(?:\\?%s)?$' % (
129 abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],
130 abnf_regexp.IHIER_PART_RE,
131 abnf_regexp.IQUERY_RE[1:-1]
132 ), re.UNICODE)
133
134
135 # Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3
136 def merge_paths(base_uri, relative_path):
137 """Merge a base URI's path with a relative URI's path."""
138 if base_uri.path is None and base_uri.authority is not None:
139 return '/' + relative_path
140 else:
141 path = base_uri.path or ''
142 index = path.rfind('/')
143 return path[:index] + '/' + relative_path
144
145
146 UseExisting = object()
147
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/urllib3/packages/rfc3986/__init__.py b/src/urllib3/packages/rfc3986/__init__.py
--- a/src/urllib3/packages/rfc3986/__init__.py
+++ b/src/urllib3/packages/rfc3986/__init__.py
@@ -36,7 +36,7 @@
__author_email__ = '[email protected]'
__license__ = 'Apache v2.0'
__copyright__ = 'Copyright 2014 Rackspace'
-__version__ = '1.3.1'
+__version__ = '1.3.2'
__all__ = (
'ParseResult',
diff --git a/src/urllib3/packages/rfc3986/misc.py b/src/urllib3/packages/rfc3986/misc.py
--- a/src/urllib3/packages/rfc3986/misc.py
+++ b/src/urllib3/packages/rfc3986/misc.py
@@ -110,28 +110,6 @@
abnf_regexp.PORT_RE), re.UNICODE)
-IHOST_MATCHER = re.compile('^' + abnf_regexp.IHOST_RE + '$', re.UNICODE)
-
-IPATH_MATCHER = re.compile(abnf_regexp.IPATH_RE, re.UNICODE)
-
-IQUERY_MATCHER = re.compile(abnf_regexp.IQUERY_RE, re.UNICODE)
-
-IFRAGMENT_MATCHER = re.compile(abnf_regexp.IFRAGMENT_RE, re.UNICODE)
-
-
-RELATIVE_IRI_MATCHER = re.compile(u'^%s(?:\\?%s)?(?:%s)?$' % (
- abnf_regexp.IRELATIVE_PART_RE,
- abnf_regexp.IQUERY_RE,
- abnf_regexp.IFRAGMENT_RE
-), re.UNICODE)
-
-ABSOLUTE_IRI_MATCHER = re.compile(u'^%s:%s(?:\\?%s)?$' % (
- abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],
- abnf_regexp.IHIER_PART_RE,
- abnf_regexp.IQUERY_RE[1:-1]
-), re.UNICODE)
-
-
# Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3
def merge_paths(base_uri, relative_path):
"""Merge a base URI's path with a relative URI's path."""
| {"golden_diff": "diff --git a/src/urllib3/packages/rfc3986/__init__.py b/src/urllib3/packages/rfc3986/__init__.py\n--- a/src/urllib3/packages/rfc3986/__init__.py\n+++ b/src/urllib3/packages/rfc3986/__init__.py\n@@ -36,7 +36,7 @@\n __author_email__ = '[email protected]'\n __license__ = 'Apache v2.0'\n __copyright__ = 'Copyright 2014 Rackspace'\n-__version__ = '1.3.1'\n+__version__ = '1.3.2'\n \n __all__ = (\n 'ParseResult',\ndiff --git a/src/urllib3/packages/rfc3986/misc.py b/src/urllib3/packages/rfc3986/misc.py\n--- a/src/urllib3/packages/rfc3986/misc.py\n+++ b/src/urllib3/packages/rfc3986/misc.py\n@@ -110,28 +110,6 @@\n abnf_regexp.PORT_RE), re.UNICODE)\n \n \n-IHOST_MATCHER = re.compile('^' + abnf_regexp.IHOST_RE + '$', re.UNICODE)\n-\n-IPATH_MATCHER = re.compile(abnf_regexp.IPATH_RE, re.UNICODE)\n-\n-IQUERY_MATCHER = re.compile(abnf_regexp.IQUERY_RE, re.UNICODE)\n-\n-IFRAGMENT_MATCHER = re.compile(abnf_regexp.IFRAGMENT_RE, re.UNICODE)\n-\n-\n-RELATIVE_IRI_MATCHER = re.compile(u'^%s(?:\\\\?%s)?(?:%s)?$' % (\n- abnf_regexp.IRELATIVE_PART_RE,\n- abnf_regexp.IQUERY_RE,\n- abnf_regexp.IFRAGMENT_RE\n-), re.UNICODE)\n-\n-ABSOLUTE_IRI_MATCHER = re.compile(u'^%s:%s(?:\\\\?%s)?$' % (\n- abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],\n- abnf_regexp.IHIER_PART_RE,\n- abnf_regexp.IQUERY_RE[1:-1]\n-), re.UNICODE)\n-\n-\n # Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3\n def merge_paths(base_uri, relative_path):\n \"\"\"Merge a base URI's path with a relative URI's path.\"\"\"\n", "issue": "Upgrade packaged rfc3986\nUpgrade to v1.3.2\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014 Rackspace\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAn implementation of semantics and validations described in RFC 3986.\n\nSee http://rfc3986.readthedocs.io/ for detailed documentation.\n\n:copyright: (c) 2014 Rackspace\n:license: Apache v2.0, see LICENSE for details\n\"\"\"\n\nfrom .api import iri_reference\nfrom .api import IRIReference\nfrom .api import is_valid_uri\nfrom .api import normalize_uri\nfrom .api import uri_reference\nfrom .api import URIReference\nfrom .api import urlparse\nfrom .parseresult import ParseResult\n\n__title__ = 'rfc3986'\n__author__ = 'Ian Stapleton Cordasco'\n__author_email__ = '[email protected]'\n__license__ = 'Apache v2.0'\n__copyright__ = 'Copyright 2014 Rackspace'\n__version__ = '1.3.1'\n\n__all__ = (\n 'ParseResult',\n 'URIReference',\n 'IRIReference',\n 'is_valid_uri',\n 'normalize_uri',\n 'uri_reference',\n 'iri_reference',\n 'urlparse',\n '__title__',\n '__author__',\n '__author_email__',\n '__license__',\n '__copyright__',\n '__version__',\n)\n", "path": "src/urllib3/packages/rfc3986/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014 Rackspace\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule containing compiled regular expressions and constants.\n\nThis module contains important constants, patterns, and compiled regular\nexpressions for parsing and validating URIs and their components.\n\"\"\"\n\nimport re\n\nfrom . import abnf_regexp\n\n# These are enumerated for the named tuple used as a superclass of\n# URIReference\nURI_COMPONENTS = ['scheme', 'authority', 'path', 'query', 'fragment']\n\nimportant_characters = {\n 'generic_delimiters': abnf_regexp.GENERIC_DELIMITERS,\n 'sub_delimiters': abnf_regexp.SUB_DELIMITERS,\n # We need to escape the '*' in this case\n 're_sub_delimiters': abnf_regexp.SUB_DELIMITERS_RE,\n 'unreserved_chars': abnf_regexp.UNRESERVED_CHARS,\n # We need to escape the '-' in this case:\n 're_unreserved': abnf_regexp.UNRESERVED_RE,\n}\n\n# For details about delimiters and reserved characters, see:\n# http://tools.ietf.org/html/rfc3986#section-2.2\nGENERIC_DELIMITERS = abnf_regexp.GENERIC_DELIMITERS_SET\nSUB_DELIMITERS = abnf_regexp.SUB_DELIMITERS_SET\nRESERVED_CHARS = abnf_regexp.RESERVED_CHARS_SET\n# For details about unreserved characters, see:\n# http://tools.ietf.org/html/rfc3986#section-2.3\nUNRESERVED_CHARS = abnf_regexp.UNRESERVED_CHARS_SET\nNON_PCT_ENCODED = abnf_regexp.NON_PCT_ENCODED_SET\n\nURI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE)\n\nSUBAUTHORITY_MATCHER = re.compile((\n '^(?:(?P<userinfo>{0})@)?' # userinfo\n '(?P<host>{1})' # host\n ':?(?P<port>{2})?$' # port\n ).format(abnf_regexp.USERINFO_RE,\n abnf_regexp.HOST_PATTERN,\n abnf_regexp.PORT_RE))\n\n\nHOST_MATCHER = re.compile('^' + abnf_regexp.HOST_RE + '$')\nIPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$')\nIPv6_MATCHER = re.compile(r'^\\[' + abnf_regexp.IPv6_ADDRZ_RFC4007_RE + r'\\]$')\n\n# Used by host validator\nIPv6_NO_RFC4007_MATCHER = re.compile(r'^\\[%s\\]$' % (\n abnf_regexp.IPv6_ADDRZ_RE\n))\n\n# Matcher used to validate path components\nPATH_MATCHER = re.compile(abnf_regexp.PATH_RE)\n\n\n# ##################################\n# Query and Fragment Matcher Section\n# ##################################\n\nQUERY_MATCHER = re.compile(abnf_regexp.QUERY_RE)\n\nFRAGMENT_MATCHER = QUERY_MATCHER\n\n# Scheme validation, see: http://tools.ietf.org/html/rfc3986#section-3.1\nSCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE))\n\nRELATIVE_REF_MATCHER = re.compile(r'^%s(\\?%s)?(#%s)?$' % (\n abnf_regexp.RELATIVE_PART_RE,\n abnf_regexp.QUERY_RE,\n abnf_regexp.FRAGMENT_RE,\n))\n\n# See http://tools.ietf.org/html/rfc3986#section-4.3\nABSOLUTE_URI_MATCHER = re.compile(r'^%s:%s(\\?%s)?$' % (\n abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],\n abnf_regexp.HIER_PART_RE,\n abnf_regexp.QUERY_RE[1:-1],\n))\n\n# ###############\n# IRIs / RFC 3987\n# ###############\n\nIRI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE, re.UNICODE)\n\nISUBAUTHORITY_MATCHER = re.compile((\n u'^(?:(?P<userinfo>{0})@)?' # iuserinfo\n u'(?P<host>{1})' # ihost\n u':?(?P<port>{2})?$' # port\n ).format(abnf_regexp.IUSERINFO_RE,\n abnf_regexp.IHOST_RE,\n abnf_regexp.PORT_RE), re.UNICODE)\n\n\nIHOST_MATCHER = re.compile('^' + abnf_regexp.IHOST_RE + '$', re.UNICODE)\n\nIPATH_MATCHER = re.compile(abnf_regexp.IPATH_RE, re.UNICODE)\n\nIQUERY_MATCHER = re.compile(abnf_regexp.IQUERY_RE, re.UNICODE)\n\nIFRAGMENT_MATCHER = re.compile(abnf_regexp.IFRAGMENT_RE, re.UNICODE)\n\n\nRELATIVE_IRI_MATCHER = re.compile(u'^%s(?:\\\\?%s)?(?:%s)?$' % (\n abnf_regexp.IRELATIVE_PART_RE,\n abnf_regexp.IQUERY_RE,\n abnf_regexp.IFRAGMENT_RE\n), re.UNICODE)\n\nABSOLUTE_IRI_MATCHER = re.compile(u'^%s:%s(?:\\\\?%s)?$' % (\n abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],\n abnf_regexp.IHIER_PART_RE,\n abnf_regexp.IQUERY_RE[1:-1]\n), re.UNICODE)\n\n\n# Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3\ndef merge_paths(base_uri, relative_path):\n \"\"\"Merge a base URI's path with a relative URI's path.\"\"\"\n if base_uri.path is None and base_uri.authority is not None:\n return '/' + relative_path\n else:\n path = base_uri.path or ''\n index = path.rfind('/')\n return path[:index] + '/' + relative_path\n\n\nUseExisting = object()\n", "path": "src/urllib3/packages/rfc3986/misc.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014 Rackspace\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nAn implementation of semantics and validations described in RFC 3986.\n\nSee http://rfc3986.readthedocs.io/ for detailed documentation.\n\n:copyright: (c) 2014 Rackspace\n:license: Apache v2.0, see LICENSE for details\n\"\"\"\n\nfrom .api import iri_reference\nfrom .api import IRIReference\nfrom .api import is_valid_uri\nfrom .api import normalize_uri\nfrom .api import uri_reference\nfrom .api import URIReference\nfrom .api import urlparse\nfrom .parseresult import ParseResult\n\n__title__ = 'rfc3986'\n__author__ = 'Ian Stapleton Cordasco'\n__author_email__ = '[email protected]'\n__license__ = 'Apache v2.0'\n__copyright__ = 'Copyright 2014 Rackspace'\n__version__ = '1.3.2'\n\n__all__ = (\n 'ParseResult',\n 'URIReference',\n 'IRIReference',\n 'is_valid_uri',\n 'normalize_uri',\n 'uri_reference',\n 'iri_reference',\n 'urlparse',\n '__title__',\n '__author__',\n '__author_email__',\n '__license__',\n '__copyright__',\n '__version__',\n)\n", "path": "src/urllib3/packages/rfc3986/__init__.py"}, {"content": "# -*- coding: utf-8 -*-\n# Copyright (c) 2014 Rackspace\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"\nModule containing compiled regular expressions and constants.\n\nThis module contains important constants, patterns, and compiled regular\nexpressions for parsing and validating URIs and their components.\n\"\"\"\n\nimport re\n\nfrom . import abnf_regexp\n\n# These are enumerated for the named tuple used as a superclass of\n# URIReference\nURI_COMPONENTS = ['scheme', 'authority', 'path', 'query', 'fragment']\n\nimportant_characters = {\n 'generic_delimiters': abnf_regexp.GENERIC_DELIMITERS,\n 'sub_delimiters': abnf_regexp.SUB_DELIMITERS,\n # We need to escape the '*' in this case\n 're_sub_delimiters': abnf_regexp.SUB_DELIMITERS_RE,\n 'unreserved_chars': abnf_regexp.UNRESERVED_CHARS,\n # We need to escape the '-' in this case:\n 're_unreserved': abnf_regexp.UNRESERVED_RE,\n}\n\n# For details about delimiters and reserved characters, see:\n# http://tools.ietf.org/html/rfc3986#section-2.2\nGENERIC_DELIMITERS = abnf_regexp.GENERIC_DELIMITERS_SET\nSUB_DELIMITERS = abnf_regexp.SUB_DELIMITERS_SET\nRESERVED_CHARS = abnf_regexp.RESERVED_CHARS_SET\n# For details about unreserved characters, see:\n# http://tools.ietf.org/html/rfc3986#section-2.3\nUNRESERVED_CHARS = abnf_regexp.UNRESERVED_CHARS_SET\nNON_PCT_ENCODED = abnf_regexp.NON_PCT_ENCODED_SET\n\nURI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE)\n\nSUBAUTHORITY_MATCHER = re.compile((\n '^(?:(?P<userinfo>{0})@)?' # userinfo\n '(?P<host>{1})' # host\n ':?(?P<port>{2})?$' # port\n ).format(abnf_regexp.USERINFO_RE,\n abnf_regexp.HOST_PATTERN,\n abnf_regexp.PORT_RE))\n\n\nHOST_MATCHER = re.compile('^' + abnf_regexp.HOST_RE + '$')\nIPv4_MATCHER = re.compile('^' + abnf_regexp.IPv4_RE + '$')\nIPv6_MATCHER = re.compile(r'^\\[' + abnf_regexp.IPv6_ADDRZ_RFC4007_RE + r'\\]$')\n\n# Used by host validator\nIPv6_NO_RFC4007_MATCHER = re.compile(r'^\\[%s\\]$' % (\n abnf_regexp.IPv6_ADDRZ_RE\n))\n\n# Matcher used to validate path components\nPATH_MATCHER = re.compile(abnf_regexp.PATH_RE)\n\n\n# ##################################\n# Query and Fragment Matcher Section\n# ##################################\n\nQUERY_MATCHER = re.compile(abnf_regexp.QUERY_RE)\n\nFRAGMENT_MATCHER = QUERY_MATCHER\n\n# Scheme validation, see: http://tools.ietf.org/html/rfc3986#section-3.1\nSCHEME_MATCHER = re.compile('^{0}$'.format(abnf_regexp.SCHEME_RE))\n\nRELATIVE_REF_MATCHER = re.compile(r'^%s(\\?%s)?(#%s)?$' % (\n abnf_regexp.RELATIVE_PART_RE,\n abnf_regexp.QUERY_RE,\n abnf_regexp.FRAGMENT_RE,\n))\n\n# See http://tools.ietf.org/html/rfc3986#section-4.3\nABSOLUTE_URI_MATCHER = re.compile(r'^%s:%s(\\?%s)?$' % (\n abnf_regexp.COMPONENT_PATTERN_DICT['scheme'],\n abnf_regexp.HIER_PART_RE,\n abnf_regexp.QUERY_RE[1:-1],\n))\n\n# ###############\n# IRIs / RFC 3987\n# ###############\n\nIRI_MATCHER = re.compile(abnf_regexp.URL_PARSING_RE, re.UNICODE)\n\nISUBAUTHORITY_MATCHER = re.compile((\n u'^(?:(?P<userinfo>{0})@)?' # iuserinfo\n u'(?P<host>{1})' # ihost\n u':?(?P<port>{2})?$' # port\n ).format(abnf_regexp.IUSERINFO_RE,\n abnf_regexp.IHOST_RE,\n abnf_regexp.PORT_RE), re.UNICODE)\n\n\n# Path merger as defined in http://tools.ietf.org/html/rfc3986#section-5.2.3\ndef merge_paths(base_uri, relative_path):\n \"\"\"Merge a base URI's path with a relative URI's path.\"\"\"\n if base_uri.path is None and base_uri.authority is not None:\n return '/' + relative_path\n else:\n path = base_uri.path or ''\n index = path.rfind('/')\n return path[:index] + '/' + relative_path\n\n\nUseExisting = object()\n", "path": "src/urllib3/packages/rfc3986/misc.py"}]} | 2,589 | 532 |
gh_patches_debug_3210 | rasdani/github-patches | git_diff | ray-project__ray-10443 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[rllib] _get_torch_exploration_action doesn't support tuple action dist
<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->
### System information
* **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Mac OS 10.15.4
* **Ray installed from (source or binary)**: binary (via pip)
* **Ray version**: 0.8.6., but nothing seems to have changed on master
* **Python version**: 3.7
### What is the problem?
When using tuple action distributions (as advised in #6372) and exploration is disabled, the line:
https://github.com/ray-project/ray/blob/a462ae2747afbeb9047e443cd51e67e3fe0b49e6/rllib/utils/exploration/stochastic_sampling.py#L75
from `_get_torch_exploration_action` raises the following exception:
```
AttributeError: 'tuple' object has no attribute 'size'
```
A simple fix that supports any type of distribution would be:
```python
logp = torch.zeros_like(action_dist.sampled_action_logp())
```
I can submit a PR if it helps.
### Reproduction (REQUIRED)
Exact command to reproduce: python `rllib_cartpole.py` for the following file
```python
import gym.envs.classic_control
from gym.spaces import Tuple, Discrete
import ray
from ray import tune
class CustomCartpole(gym.envs.classic_control.CartPoleEnv):
"""Add a dimension to the cartpole action space that is ignored."""
def __init__(self, env_config):
super().__init__()
# if override_actions is false this is just the Cartpole environment
self.override_actions = env_config['override_actions']
if self.override_actions:
# 2 is the environment's normal action space
# 4 is just a dummy number to give it an extra dimension
self.original_action_space = self.action_space
self.action_space = Tuple([Discrete(2), Discrete(4)])
self.tuple_action_space = self.action_space
def step(self, action):
# call the cartpole environment with the original action
if self.override_actions:
self.action_space = self.original_action_space
return super().step(action[0])
else:
return super().step(action)
def main():
ray.init()
tune.run(
"PPO",
stop={"episode_reward_mean": 50},
config={
"env": CustomCartpole,
"env_config": {'override_actions': True},
"num_gpus": 0,
"num_workers": 1,
"eager": False,
"evaluation_interval": 1,
"evaluation_config": {
"explore": False,
},
"framework": "torch",
},
)
if __name__ == '__main__':
main()
```
- [x] I have verified my script runs in a clean environment and reproduces the issue.
- [ ] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `rllib/utils/exploration/stochastic_sampling.py`
Content:
```
1 import tree
2 from typing import Union
3
4 from ray.rllib.models.action_dist import ActionDistribution
5 from ray.rllib.models.modelv2 import ModelV2
6 from ray.rllib.utils.annotations import override
7 from ray.rllib.utils.exploration.exploration import Exploration
8 from ray.rllib.utils.framework import try_import_tf, try_import_torch, \
9 TensorType
10
11 tf1, tf, tfv = try_import_tf()
12 torch, _ = try_import_torch()
13
14
15 class StochasticSampling(Exploration):
16 """An exploration that simply samples from a distribution.
17
18 The sampling can be made deterministic by passing explore=False into
19 the call to `get_exploration_action`.
20 Also allows for scheduled parameters for the distributions, such as
21 lowering stddev, temperature, etc.. over time.
22 """
23
24 def __init__(self, action_space, *, framework: str, model: ModelV2,
25 **kwargs):
26 """Initializes a StochasticSampling Exploration object.
27
28 Args:
29 action_space (Space): The gym action space used by the environment.
30 framework (str): One of None, "tf", "torch".
31 """
32 assert framework is not None
33 super().__init__(
34 action_space, model=model, framework=framework, **kwargs)
35
36 @override(Exploration)
37 def get_exploration_action(self,
38 *,
39 action_distribution: ActionDistribution,
40 timestep: Union[int, TensorType],
41 explore: bool = True):
42 if self.framework == "torch":
43 return self._get_torch_exploration_action(action_distribution,
44 explore)
45 else:
46 return self._get_tf_exploration_action_op(action_distribution,
47 explore)
48
49 def _get_tf_exploration_action_op(self, action_dist, explore):
50 sample = action_dist.sample()
51 deterministic_sample = action_dist.deterministic_sample()
52 action = tf.cond(
53 tf.constant(explore) if isinstance(explore, bool) else explore,
54 true_fn=lambda: sample,
55 false_fn=lambda: deterministic_sample)
56
57 def logp_false_fn():
58 batch_size = tf.shape(tree.flatten(action)[0])[0]
59 return tf.zeros(shape=(batch_size, ), dtype=tf.float32)
60
61 logp = tf.cond(
62 tf.constant(explore) if isinstance(explore, bool) else explore,
63 true_fn=lambda: action_dist.sampled_action_logp(),
64 false_fn=logp_false_fn)
65
66 return action, logp
67
68 @staticmethod
69 def _get_torch_exploration_action(action_dist, explore):
70 if explore:
71 action = action_dist.sample()
72 logp = action_dist.sampled_action_logp()
73 else:
74 action = action_dist.deterministic_sample()
75 logp = torch.zeros((action.size()[0], ), dtype=torch.float32)
76 return action, logp
77
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/rllib/utils/exploration/stochastic_sampling.py b/rllib/utils/exploration/stochastic_sampling.py
--- a/rllib/utils/exploration/stochastic_sampling.py
+++ b/rllib/utils/exploration/stochastic_sampling.py
@@ -72,5 +72,5 @@
logp = action_dist.sampled_action_logp()
else:
action = action_dist.deterministic_sample()
- logp = torch.zeros((action.size()[0], ), dtype=torch.float32)
+ logp = torch.zeros_like(action_dist.sampled_action_logp())
return action, logp
| {"golden_diff": "diff --git a/rllib/utils/exploration/stochastic_sampling.py b/rllib/utils/exploration/stochastic_sampling.py\n--- a/rllib/utils/exploration/stochastic_sampling.py\n+++ b/rllib/utils/exploration/stochastic_sampling.py\n@@ -72,5 +72,5 @@\n logp = action_dist.sampled_action_logp()\n else:\n action = action_dist.deterministic_sample()\n- logp = torch.zeros((action.size()[0], ), dtype=torch.float32)\n+ logp = torch.zeros_like(action_dist.sampled_action_logp())\n return action, logp\n", "issue": "[rllib] _get_torch_exploration_action doesn't support tuple action dist\n<!--Please include [tune], [rllib], [autoscaler] etc. in the issue title if relevant-->\r\n### System information\r\n\r\n* **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: Mac OS 10.15.4\r\n* **Ray installed from (source or binary)**: binary (via pip)\r\n* **Ray version**: 0.8.6., but nothing seems to have changed on master\r\n* **Python version**: 3.7\r\n\r\n### What is the problem?\r\n\r\nWhen using tuple action distributions (as advised in #6372) and exploration is disabled, the line:\r\n\r\nhttps://github.com/ray-project/ray/blob/a462ae2747afbeb9047e443cd51e67e3fe0b49e6/rllib/utils/exploration/stochastic_sampling.py#L75\r\n\r\nfrom `_get_torch_exploration_action` raises the following exception:\r\n\r\n```\r\nAttributeError: 'tuple' object has no attribute 'size'\r\n```\r\n\r\nA simple fix that supports any type of distribution would be:\r\n```python\r\nlogp = torch.zeros_like(action_dist.sampled_action_logp())\r\n```\r\n\r\nI can submit a PR if it helps. \r\n\r\n### Reproduction (REQUIRED)\r\n\r\nExact command to reproduce: python `rllib_cartpole.py` for the following file\r\n\r\n```python\r\nimport gym.envs.classic_control\r\nfrom gym.spaces import Tuple, Discrete\r\n\r\nimport ray\r\nfrom ray import tune\r\n\r\n\r\nclass CustomCartpole(gym.envs.classic_control.CartPoleEnv):\r\n \"\"\"Add a dimension to the cartpole action space that is ignored.\"\"\"\r\n\r\n def __init__(self, env_config):\r\n super().__init__()\r\n # if override_actions is false this is just the Cartpole environment\r\n self.override_actions = env_config['override_actions']\r\n if self.override_actions:\r\n # 2 is the environment's normal action space\r\n # 4 is just a dummy number to give it an extra dimension\r\n self.original_action_space = self.action_space\r\n self.action_space = Tuple([Discrete(2), Discrete(4)])\r\n self.tuple_action_space = self.action_space\r\n\r\n def step(self, action):\r\n # call the cartpole environment with the original action\r\n if self.override_actions:\r\n self.action_space = self.original_action_space\r\n return super().step(action[0])\r\n else:\r\n return super().step(action)\r\n\r\n\r\ndef main():\r\n ray.init()\r\n tune.run(\r\n \"PPO\",\r\n stop={\"episode_reward_mean\": 50},\r\n config={\r\n \"env\": CustomCartpole,\r\n \"env_config\": {'override_actions': True},\r\n \"num_gpus\": 0,\r\n \"num_workers\": 1,\r\n \"eager\": False,\r\n \"evaluation_interval\": 1,\r\n \"evaluation_config\": {\r\n \"explore\": False,\r\n },\r\n \"framework\": \"torch\",\r\n },\r\n )\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\n\r\n- [x] I have verified my script runs in a clean environment and reproduces the issue.\r\n- [ ] I have verified the issue also occurs with the [latest wheels](https://docs.ray.io/en/latest/installation.html).\r\n\n", "before_files": [{"content": "import tree\nfrom typing import Union\n\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.exploration.exploration import Exploration\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch, \\\n TensorType\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\nclass StochasticSampling(Exploration):\n \"\"\"An exploration that simply samples from a distribution.\n\n The sampling can be made deterministic by passing explore=False into\n the call to `get_exploration_action`.\n Also allows for scheduled parameters for the distributions, such as\n lowering stddev, temperature, etc.. over time.\n \"\"\"\n\n def __init__(self, action_space, *, framework: str, model: ModelV2,\n **kwargs):\n \"\"\"Initializes a StochasticSampling Exploration object.\n\n Args:\n action_space (Space): The gym action space used by the environment.\n framework (str): One of None, \"tf\", \"torch\".\n \"\"\"\n assert framework is not None\n super().__init__(\n action_space, model=model, framework=framework, **kwargs)\n\n @override(Exploration)\n def get_exploration_action(self,\n *,\n action_distribution: ActionDistribution,\n timestep: Union[int, TensorType],\n explore: bool = True):\n if self.framework == \"torch\":\n return self._get_torch_exploration_action(action_distribution,\n explore)\n else:\n return self._get_tf_exploration_action_op(action_distribution,\n explore)\n\n def _get_tf_exploration_action_op(self, action_dist, explore):\n sample = action_dist.sample()\n deterministic_sample = action_dist.deterministic_sample()\n action = tf.cond(\n tf.constant(explore) if isinstance(explore, bool) else explore,\n true_fn=lambda: sample,\n false_fn=lambda: deterministic_sample)\n\n def logp_false_fn():\n batch_size = tf.shape(tree.flatten(action)[0])[0]\n return tf.zeros(shape=(batch_size, ), dtype=tf.float32)\n\n logp = tf.cond(\n tf.constant(explore) if isinstance(explore, bool) else explore,\n true_fn=lambda: action_dist.sampled_action_logp(),\n false_fn=logp_false_fn)\n\n return action, logp\n\n @staticmethod\n def _get_torch_exploration_action(action_dist, explore):\n if explore:\n action = action_dist.sample()\n logp = action_dist.sampled_action_logp()\n else:\n action = action_dist.deterministic_sample()\n logp = torch.zeros((action.size()[0], ), dtype=torch.float32)\n return action, logp\n", "path": "rllib/utils/exploration/stochastic_sampling.py"}], "after_files": [{"content": "import tree\nfrom typing import Union\n\nfrom ray.rllib.models.action_dist import ActionDistribution\nfrom ray.rllib.models.modelv2 import ModelV2\nfrom ray.rllib.utils.annotations import override\nfrom ray.rllib.utils.exploration.exploration import Exploration\nfrom ray.rllib.utils.framework import try_import_tf, try_import_torch, \\\n TensorType\n\ntf1, tf, tfv = try_import_tf()\ntorch, _ = try_import_torch()\n\n\nclass StochasticSampling(Exploration):\n \"\"\"An exploration that simply samples from a distribution.\n\n The sampling can be made deterministic by passing explore=False into\n the call to `get_exploration_action`.\n Also allows for scheduled parameters for the distributions, such as\n lowering stddev, temperature, etc.. over time.\n \"\"\"\n\n def __init__(self, action_space, *, framework: str, model: ModelV2,\n **kwargs):\n \"\"\"Initializes a StochasticSampling Exploration object.\n\n Args:\n action_space (Space): The gym action space used by the environment.\n framework (str): One of None, \"tf\", \"torch\".\n \"\"\"\n assert framework is not None\n super().__init__(\n action_space, model=model, framework=framework, **kwargs)\n\n @override(Exploration)\n def get_exploration_action(self,\n *,\n action_distribution: ActionDistribution,\n timestep: Union[int, TensorType],\n explore: bool = True):\n if self.framework == \"torch\":\n return self._get_torch_exploration_action(action_distribution,\n explore)\n else:\n return self._get_tf_exploration_action_op(action_distribution,\n explore)\n\n def _get_tf_exploration_action_op(self, action_dist, explore):\n sample = action_dist.sample()\n deterministic_sample = action_dist.deterministic_sample()\n action = tf.cond(\n tf.constant(explore) if isinstance(explore, bool) else explore,\n true_fn=lambda: sample,\n false_fn=lambda: deterministic_sample)\n\n def logp_false_fn():\n batch_size = tf.shape(tree.flatten(action)[0])[0]\n return tf.zeros(shape=(batch_size, ), dtype=tf.float32)\n\n logp = tf.cond(\n tf.constant(explore) if isinstance(explore, bool) else explore,\n true_fn=lambda: action_dist.sampled_action_logp(),\n false_fn=logp_false_fn)\n\n return action, logp\n\n @staticmethod\n def _get_torch_exploration_action(action_dist, explore):\n if explore:\n action = action_dist.sample()\n logp = action_dist.sampled_action_logp()\n else:\n action = action_dist.deterministic_sample()\n logp = torch.zeros_like(action_dist.sampled_action_logp())\n return action, logp\n", "path": "rllib/utils/exploration/stochastic_sampling.py"}]} | 1,728 | 132 |
gh_patches_debug_1209 | rasdani/github-patches | git_diff | scrapy__scrapy-4503 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fix the hoverxref configuration
> You shouldn't override hoverxref_version and hoverxref_project since they are taken automatically from Read the Docs.
>
> If you want to avoid your CI failing because of this, you can define the environment variables as Read the Docs does:
>
> READTHEDOCS_PROJECT=scrapy
> READTHEDOCS_VERSION=''
>
> With the current configuration, all the versions built on Read the Docs will point to a different version on Read the Docs and this will conflict. For example, current master version in Read the Docs defines hoverxref_version='2.0.0' but that version does not exist on Read the Docs and the tooltip does not known where to get the content from.
@humitos at https://github.com/scrapy/scrapy/pull/4480#discussion_r409026912
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Scrapy documentation build configuration file, created by
4 # sphinx-quickstart on Mon Nov 24 12:02:52 2008.
5 #
6 # This file is execfile()d with the current directory set to its containing dir.
7 #
8 # The contents of this file are pickled, so don't put values in the namespace
9 # that aren't pickleable (module imports are okay, they're removed automatically).
10 #
11 # All configuration values have a default; values that are commented out
12 # serve to show the default.
13
14 import sys
15 from datetime import datetime
16 from os import path
17
18 # If your extensions are in another directory, add it here. If the directory
19 # is relative to the documentation root, use os.path.abspath to make it
20 # absolute, like shown here.
21 sys.path.append(path.join(path.dirname(__file__), "_ext"))
22 sys.path.insert(0, path.dirname(path.dirname(__file__)))
23
24
25 # General configuration
26 # ---------------------
27
28 # Add any Sphinx extension module names here, as strings. They can be extensions
29 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
30 extensions = [
31 'hoverxref.extension',
32 'notfound.extension',
33 'scrapydocs',
34 'sphinx.ext.autodoc',
35 'sphinx.ext.coverage',
36 'sphinx.ext.intersphinx',
37 'sphinx.ext.viewcode',
38 ]
39
40 # Add any paths that contain templates here, relative to this directory.
41 templates_path = ['_templates']
42
43 # The suffix of source filenames.
44 source_suffix = '.rst'
45
46 # The encoding of source files.
47 #source_encoding = 'utf-8'
48
49 # The master toctree document.
50 master_doc = 'index'
51
52 # General information about the project.
53 project = 'Scrapy'
54 copyright = '2008–{}, Scrapy developers'.format(datetime.now().year)
55
56 # The version info for the project you're documenting, acts as replacement for
57 # |version| and |release|, also used in various other places throughout the
58 # built documents.
59 #
60 # The short X.Y version.
61 try:
62 import scrapy
63 version = '.'.join(map(str, scrapy.version_info[:2]))
64 release = scrapy.__version__
65 except ImportError:
66 version = ''
67 release = ''
68
69 # The language for content autogenerated by Sphinx. Refer to documentation
70 # for a list of supported languages.
71 language = 'en'
72
73 # There are two options for replacing |today|: either, you set today to some
74 # non-false value, then it is used:
75 #today = ''
76 # Else, today_fmt is used as the format for a strftime call.
77 #today_fmt = '%B %d, %Y'
78
79 # List of documents that shouldn't be included in the build.
80 #unused_docs = []
81
82 exclude_patterns = ['build']
83
84 # List of directories, relative to source directory, that shouldn't be searched
85 # for source files.
86 exclude_trees = ['.build']
87
88 # The reST default role (used for this markup: `text`) to use for all documents.
89 #default_role = None
90
91 # If true, '()' will be appended to :func: etc. cross-reference text.
92 #add_function_parentheses = True
93
94 # If true, the current module name will be prepended to all description
95 # unit titles (such as .. function::).
96 #add_module_names = True
97
98 # If true, sectionauthor and moduleauthor directives will be shown in the
99 # output. They are ignored by default.
100 #show_authors = False
101
102 # The name of the Pygments (syntax highlighting) style to use.
103 pygments_style = 'sphinx'
104
105
106 # Options for HTML output
107 # -----------------------
108
109 # The theme to use for HTML and HTML Help pages. See the documentation for
110 # a list of builtin themes.
111 html_theme = 'sphinx_rtd_theme'
112
113 # Theme options are theme-specific and customize the look and feel of a theme
114 # further. For a list of options available for each theme, see the
115 # documentation.
116 #html_theme_options = {}
117
118 # Add any paths that contain custom themes here, relative to this directory.
119 # Add path to the RTD explicitly to robustify builds (otherwise might
120 # fail in a clean Debian build env)
121 import sphinx_rtd_theme
122 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
123
124
125 # The style sheet to use for HTML and HTML Help pages. A file of that name
126 # must exist either in Sphinx' static/ path, or in one of the custom paths
127 # given in html_static_path.
128 # html_style = 'scrapydoc.css'
129
130 # The name for this set of Sphinx documents. If None, it defaults to
131 # "<project> v<release> documentation".
132 #html_title = None
133
134 # A shorter title for the navigation bar. Default is the same as html_title.
135 #html_short_title = None
136
137 # The name of an image file (relative to this directory) to place at the top
138 # of the sidebar.
139 #html_logo = None
140
141 # The name of an image file (within the static path) to use as favicon of the
142 # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
143 # pixels large.
144 #html_favicon = None
145
146 # Add any paths that contain custom static files (such as style sheets) here,
147 # relative to this directory. They are copied after the builtin static files,
148 # so a file named "default.css" will overwrite the builtin "default.css".
149 html_static_path = ['_static']
150
151 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
152 # using the given strftime format.
153 html_last_updated_fmt = '%b %d, %Y'
154
155 # Custom sidebar templates, maps document names to template names.
156 #html_sidebars = {}
157
158 # Additional templates that should be rendered to pages, maps page names to
159 # template names.
160 #html_additional_pages = {}
161
162 # If false, no module index is generated.
163 #html_use_modindex = True
164
165 # If false, no index is generated.
166 #html_use_index = True
167
168 # If true, the index is split into individual pages for each letter.
169 #html_split_index = False
170
171 # If true, the reST sources are included in the HTML build as _sources/<name>.
172 html_copy_source = True
173
174 # If true, an OpenSearch description file will be output, and all pages will
175 # contain a <link> tag referring to it. The value of this option must be the
176 # base URL from which the finished HTML is served.
177 #html_use_opensearch = ''
178
179 # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
180 #html_file_suffix = ''
181
182 # Output file base name for HTML help builder.
183 htmlhelp_basename = 'Scrapydoc'
184
185
186 # Options for LaTeX output
187 # ------------------------
188
189 # The paper size ('letter' or 'a4').
190 #latex_paper_size = 'letter'
191
192 # The font size ('10pt', '11pt' or '12pt').
193 #latex_font_size = '10pt'
194
195 # Grouping the document tree into LaTeX files. List of tuples
196 # (source start file, target name, title, author, document class [howto/manual]).
197 latex_documents = [
198 ('index', 'Scrapy.tex', 'Scrapy Documentation',
199 'Scrapy developers', 'manual'),
200 ]
201
202 # The name of an image file (relative to this directory) to place at the top of
203 # the title page.
204 #latex_logo = None
205
206 # For "manual" documents, if this is true, then toplevel headings are parts,
207 # not chapters.
208 #latex_use_parts = False
209
210 # Additional stuff for the LaTeX preamble.
211 #latex_preamble = ''
212
213 # Documents to append as an appendix to all manuals.
214 #latex_appendices = []
215
216 # If false, no module index is generated.
217 #latex_use_modindex = True
218
219
220 # Options for the linkcheck builder
221 # ---------------------------------
222
223 # A list of regular expressions that match URIs that should not be checked when
224 # doing a linkcheck build.
225 linkcheck_ignore = [
226 'http://localhost:\d+', 'http://hg.scrapy.org',
227 'http://directory.google.com/'
228 ]
229
230
231 # Options for the Coverage extension
232 # ----------------------------------
233 coverage_ignore_pyobjects = [
234 # Contract’s add_pre_hook and add_post_hook are not documented because
235 # they should be transparent to contract developers, for whom pre_hook and
236 # post_hook should be the actual concern.
237 r'\bContract\.add_(pre|post)_hook$',
238
239 # ContractsManager is an internal class, developers are not expected to
240 # interact with it directly in any way.
241 r'\bContractsManager\b$',
242
243 # For default contracts we only want to document their general purpose in
244 # their __init__ method, the methods they reimplement to achieve that purpose
245 # should be irrelevant to developers using those contracts.
246 r'\w+Contract\.(adjust_request_args|(pre|post)_process)$',
247
248 # Methods of downloader middlewares are not documented, only the classes
249 # themselves, since downloader middlewares are controlled through Scrapy
250 # settings.
251 r'^scrapy\.downloadermiddlewares\.\w*?\.(\w*?Middleware|DownloaderStats)\.',
252
253 # Base classes of downloader middlewares are implementation details that
254 # are not meant for users.
255 r'^scrapy\.downloadermiddlewares\.\w*?\.Base\w*?Middleware',
256
257 # Private exception used by the command-line interface implementation.
258 r'^scrapy\.exceptions\.UsageError',
259
260 # Methods of BaseItemExporter subclasses are only documented in
261 # BaseItemExporter.
262 r'^scrapy\.exporters\.(?!BaseItemExporter\b)\w*?\.',
263
264 # Extension behavior is only modified through settings. Methods of
265 # extension classes, as well as helper functions, are implementation
266 # details that are not documented.
267 r'^scrapy\.extensions\.[a-z]\w*?\.[A-Z]\w*?\.', # methods
268 r'^scrapy\.extensions\.[a-z]\w*?\.[a-z]', # helper functions
269
270 # Never documented before, and deprecated now.
271 r'^scrapy\.item\.DictItem$',
272 r'^scrapy\.linkextractors\.FilteringLinkExtractor$',
273
274 # Implementation detail of LxmlLinkExtractor
275 r'^scrapy\.linkextractors\.lxmlhtml\.LxmlParserLinkExtractor',
276 ]
277
278
279 # Options for the InterSphinx extension
280 # -------------------------------------
281
282 intersphinx_mapping = {
283 'coverage': ('https://coverage.readthedocs.io/en/stable', None),
284 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),
285 'pytest': ('https://docs.pytest.org/en/latest', None),
286 'python': ('https://docs.python.org/3', None),
287 'sphinx': ('https://www.sphinx-doc.org/en/master', None),
288 'tox': ('https://tox.readthedocs.io/en/latest', None),
289 'twisted': ('https://twistedmatrix.com/documents/current', None),
290 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),
291 }
292
293
294 # Options for sphinx-hoverxref options
295 # ------------------------------------
296
297 hoverxref_auto_ref = True
298 hoverxref_project = "scrapy"
299 hoverxref_version = release
300 hoverxref_role_types = {
301 "class": "tooltip",
302 "confval": "tooltip",
303 "hoverxref": "tooltip",
304 "mod": "tooltip",
305 "ref": "tooltip",
306 }
307
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -295,8 +295,6 @@
# ------------------------------------
hoverxref_auto_ref = True
-hoverxref_project = "scrapy"
-hoverxref_version = release
hoverxref_role_types = {
"class": "tooltip",
"confval": "tooltip",
| {"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -295,8 +295,6 @@\n # ------------------------------------\n \n hoverxref_auto_ref = True\n-hoverxref_project = \"scrapy\"\n-hoverxref_version = release\n hoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n", "issue": "Fix the hoverxref configuration\n> You shouldn't override hoverxref_version and hoverxref_project since they are taken automatically from Read the Docs.\r\n>\r\n> If you want to avoid your CI failing because of this, you can define the environment variables as Read the Docs does:\r\n> \r\n> READTHEDOCS_PROJECT=scrapy\r\n> READTHEDOCS_VERSION=''\r\n> \r\n> With the current configuration, all the versions built on Read the Docs will point to a different version on Read the Docs and this will conflict. For example, current master version in Read the Docs defines hoverxref_version='2.0.0' but that version does not exist on Read the Docs and the tooltip does not known where to get the content from.\r\n\r\n@humitos at https://github.com/scrapy/scrapy/pull/4480#discussion_r409026912\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_project = \"scrapy\"\nhoverxref_version = release\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Scrapy documentation build configuration file, created by\n# sphinx-quickstart on Mon Nov 24 12:02:52 2008.\n#\n# This file is execfile()d with the current directory set to its containing dir.\n#\n# The contents of this file are pickled, so don't put values in the namespace\n# that aren't pickleable (module imports are okay, they're removed automatically).\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\nimport sys\nfrom datetime import datetime\nfrom os import path\n\n# If your extensions are in another directory, add it here. If the directory\n# is relative to the documentation root, use os.path.abspath to make it\n# absolute, like shown here.\nsys.path.append(path.join(path.dirname(__file__), \"_ext\"))\nsys.path.insert(0, path.dirname(path.dirname(__file__)))\n\n\n# General configuration\n# ---------------------\n\n# Add any Sphinx extension module names here, as strings. They can be extensions\n# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.\nextensions = [\n 'hoverxref.extension',\n 'notfound.extension',\n 'scrapydocs',\n 'sphinx.ext.autodoc',\n 'sphinx.ext.coverage',\n 'sphinx.ext.intersphinx',\n 'sphinx.ext.viewcode',\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix of source filenames.\nsource_suffix = '.rst'\n\n# The encoding of source files.\n#source_encoding = 'utf-8'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Scrapy'\ncopyright = '2008\u2013{}, Scrapy developers'.format(datetime.now().year)\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\ntry:\n import scrapy\n version = '.'.join(map(str, scrapy.version_info[:2]))\n release = scrapy.__version__\nexcept ImportError:\n version = ''\n release = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\nlanguage = 'en'\n\n# There are two options for replacing |today|: either, you set today to some\n# non-false value, then it is used:\n#today = ''\n# Else, today_fmt is used as the format for a strftime call.\n#today_fmt = '%B %d, %Y'\n\n# List of documents that shouldn't be included in the build.\n#unused_docs = []\n\nexclude_patterns = ['build']\n\n# List of directories, relative to source directory, that shouldn't be searched\n# for source files.\nexclude_trees = ['.build']\n\n# The reST default role (used for this markup: `text`) to use for all documents.\n#default_role = None\n\n# If true, '()' will be appended to :func: etc. cross-reference text.\n#add_function_parentheses = True\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\n#add_module_names = True\n\n# If true, sectionauthor and moduleauthor directives will be shown in the\n# output. They are ignored by default.\n#show_authors = False\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n\n# Options for HTML output\n# -----------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\nhtml_theme = 'sphinx_rtd_theme'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#html_theme_options = {}\n\n# Add any paths that contain custom themes here, relative to this directory.\n# Add path to the RTD explicitly to robustify builds (otherwise might\n# fail in a clean Debian build env)\nimport sphinx_rtd_theme\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n\n# The style sheet to use for HTML and HTML Help pages. A file of that name\n# must exist either in Sphinx' static/ path, or in one of the custom paths\n# given in html_static_path.\n# html_style = 'scrapydoc.css'\n\n# The name for this set of Sphinx documents. If None, it defaults to\n# \"<project> v<release> documentation\".\n#html_title = None\n\n# A shorter title for the navigation bar. Default is the same as html_title.\n#html_short_title = None\n\n# The name of an image file (relative to this directory) to place at the top\n# of the sidebar.\n#html_logo = None\n\n# The name of an image file (within the static path) to use as favicon of the\n# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32\n# pixels large.\n#html_favicon = None\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,\n# using the given strftime format.\nhtml_last_updated_fmt = '%b %d, %Y'\n\n# Custom sidebar templates, maps document names to template names.\n#html_sidebars = {}\n\n# Additional templates that should be rendered to pages, maps page names to\n# template names.\n#html_additional_pages = {}\n\n# If false, no module index is generated.\n#html_use_modindex = True\n\n# If false, no index is generated.\n#html_use_index = True\n\n# If true, the index is split into individual pages for each letter.\n#html_split_index = False\n\n# If true, the reST sources are included in the HTML build as _sources/<name>.\nhtml_copy_source = True\n\n# If true, an OpenSearch description file will be output, and all pages will\n# contain a <link> tag referring to it. The value of this option must be the\n# base URL from which the finished HTML is served.\n#html_use_opensearch = ''\n\n# If nonempty, this is the file name suffix for HTML files (e.g. \".xhtml\").\n#html_file_suffix = ''\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Scrapydoc'\n\n\n# Options for LaTeX output\n# ------------------------\n\n# The paper size ('letter' or 'a4').\n#latex_paper_size = 'letter'\n\n# The font size ('10pt', '11pt' or '12pt').\n#latex_font_size = '10pt'\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title, author, document class [howto/manual]).\nlatex_documents = [\n ('index', 'Scrapy.tex', 'Scrapy Documentation',\n 'Scrapy developers', 'manual'),\n]\n\n# The name of an image file (relative to this directory) to place at the top of\n# the title page.\n#latex_logo = None\n\n# For \"manual\" documents, if this is true, then toplevel headings are parts,\n# not chapters.\n#latex_use_parts = False\n\n# Additional stuff for the LaTeX preamble.\n#latex_preamble = ''\n\n# Documents to append as an appendix to all manuals.\n#latex_appendices = []\n\n# If false, no module index is generated.\n#latex_use_modindex = True\n\n\n# Options for the linkcheck builder\n# ---------------------------------\n\n# A list of regular expressions that match URIs that should not be checked when\n# doing a linkcheck build.\nlinkcheck_ignore = [\n 'http://localhost:\\d+', 'http://hg.scrapy.org',\n 'http://directory.google.com/'\n]\n\n\n# Options for the Coverage extension\n# ----------------------------------\ncoverage_ignore_pyobjects = [\n # Contract\u2019s add_pre_hook and add_post_hook are not documented because\n # they should be transparent to contract developers, for whom pre_hook and\n # post_hook should be the actual concern.\n r'\\bContract\\.add_(pre|post)_hook$',\n\n # ContractsManager is an internal class, developers are not expected to\n # interact with it directly in any way.\n r'\\bContractsManager\\b$',\n\n # For default contracts we only want to document their general purpose in\n # their __init__ method, the methods they reimplement to achieve that purpose\n # should be irrelevant to developers using those contracts.\n r'\\w+Contract\\.(adjust_request_args|(pre|post)_process)$',\n\n # Methods of downloader middlewares are not documented, only the classes\n # themselves, since downloader middlewares are controlled through Scrapy\n # settings.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.(\\w*?Middleware|DownloaderStats)\\.',\n\n # Base classes of downloader middlewares are implementation details that\n # are not meant for users.\n r'^scrapy\\.downloadermiddlewares\\.\\w*?\\.Base\\w*?Middleware',\n\n # Private exception used by the command-line interface implementation.\n r'^scrapy\\.exceptions\\.UsageError',\n\n # Methods of BaseItemExporter subclasses are only documented in\n # BaseItemExporter.\n r'^scrapy\\.exporters\\.(?!BaseItemExporter\\b)\\w*?\\.',\n\n # Extension behavior is only modified through settings. Methods of\n # extension classes, as well as helper functions, are implementation\n # details that are not documented.\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[A-Z]\\w*?\\.', # methods\n r'^scrapy\\.extensions\\.[a-z]\\w*?\\.[a-z]', # helper functions\n\n # Never documented before, and deprecated now.\n r'^scrapy\\.item\\.DictItem$',\n r'^scrapy\\.linkextractors\\.FilteringLinkExtractor$',\n\n # Implementation detail of LxmlLinkExtractor\n r'^scrapy\\.linkextractors\\.lxmlhtml\\.LxmlParserLinkExtractor',\n]\n\n\n# Options for the InterSphinx extension\n# -------------------------------------\n\nintersphinx_mapping = {\n 'coverage': ('https://coverage.readthedocs.io/en/stable', None),\n 'cssselect': ('https://cssselect.readthedocs.io/en/latest', None),\n 'pytest': ('https://docs.pytest.org/en/latest', None),\n 'python': ('https://docs.python.org/3', None),\n 'sphinx': ('https://www.sphinx-doc.org/en/master', None),\n 'tox': ('https://tox.readthedocs.io/en/latest', None),\n 'twisted': ('https://twistedmatrix.com/documents/current', None),\n 'twistedapi': ('https://twistedmatrix.com/documents/current/api', None),\n}\n\n\n# Options for sphinx-hoverxref options\n# ------------------------------------\n\nhoverxref_auto_ref = True\nhoverxref_role_types = {\n \"class\": \"tooltip\",\n \"confval\": \"tooltip\",\n \"hoverxref\": \"tooltip\",\n \"mod\": \"tooltip\",\n \"ref\": \"tooltip\",\n}\n", "path": "docs/conf.py"}]} | 3,798 | 88 |
gh_patches_debug_35689 | rasdani/github-patches | git_diff | microsoft__torchgeo-1898 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add `ignore_index` support for Jaccard Loss
### Summary
Currently, the `SemanticSegmentationTask` recognises the `ignore_index` parameter only when cross entropy and focal loss is used. However, `smp.losses.JaccardLoss` implicitly supports this option via its `classes` parameter, which is currently set to `self.hparams["num_classes"]`. The FR is to adapt what is passed as argument to this parameter so that `ignore_index` can work with all currently supported losses.
### Rationale
The Jaccard index is a common semantic segmentation metric which also makes for a decent loss function. Because of the way it is defined, it is important to ignore overly dominant classes (e.g., the background when classifying building rooftops); otherwise performance can be hindered significantly.
### Implementation
Change the `classes` argument of `smp.losses.JaccardLoss` in `SemanticSegmentationTask.configure_losses` from `self.hparams["num_classes"]` to `list(set(list(range(self.hparams["num_classes"]))).difference(set([ignore_index])))`, assuming that `ignore_index` is not `None`.
### Alternatives
_No response_
### Additional information
_No response_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchgeo/trainers/segmentation.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 """Trainers for semantic segmentation."""
5
6 import os
7 import warnings
8 from typing import Any, Optional, Union
9
10 import matplotlib.pyplot as plt
11 import segmentation_models_pytorch as smp
12 import torch.nn as nn
13 from matplotlib.figure import Figure
14 from torch import Tensor
15 from torchmetrics import MetricCollection
16 from torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex
17 from torchvision.models._api import WeightsEnum
18
19 from ..datasets import RGBBandsMissingError, unbind_samples
20 from ..models import FCN, get_weight
21 from . import utils
22 from .base import BaseTask
23
24
25 class SemanticSegmentationTask(BaseTask):
26 """Semantic Segmentation."""
27
28 def __init__(
29 self,
30 model: str = "unet",
31 backbone: str = "resnet50",
32 weights: Optional[Union[WeightsEnum, str, bool]] = None,
33 in_channels: int = 3,
34 num_classes: int = 1000,
35 num_filters: int = 3,
36 loss: str = "ce",
37 class_weights: Optional[Tensor] = None,
38 ignore_index: Optional[int] = None,
39 lr: float = 1e-3,
40 patience: int = 10,
41 freeze_backbone: bool = False,
42 freeze_decoder: bool = False,
43 ) -> None:
44 """Inititalize a new SemanticSegmentationTask instance.
45
46 Args:
47 model: Name of the
48 `smp <https://smp.readthedocs.io/en/latest/models.html>`__ model to use.
49 backbone: Name of the `timm
50 <https://smp.readthedocs.io/en/latest/encoders_timm.html>`__ or `smp
51 <https://smp.readthedocs.io/en/latest/encoders.html>`__ backbone to use.
52 weights: Initial model weights. Either a weight enum, the string
53 representation of a weight enum, True for ImageNet weights, False or
54 None for random weights, or the path to a saved model state dict. FCN
55 model does not support pretrained weights. Pretrained ViT weight enums
56 are not supported yet.
57 in_channels: Number of input channels to model.
58 num_classes: Number of prediction classes.
59 num_filters: Number of filters. Only applicable when model='fcn'.
60 loss: Name of the loss function, currently supports
61 'ce', 'jaccard' or 'focal' loss.
62 class_weights: Optional rescaling weight given to each
63 class and used with 'ce' loss.
64 ignore_index: Optional integer class index to ignore in the loss and
65 metrics.
66 lr: Learning rate for optimizer.
67 patience: Patience for learning rate scheduler.
68 freeze_backbone: Freeze the backbone network to fine-tune the
69 decoder and segmentation head.
70 freeze_decoder: Freeze the decoder network to linear probe
71 the segmentation head.
72
73 Warns:
74 UserWarning: When loss='jaccard' and ignore_index is specified.
75
76 .. versionchanged:: 0.3
77 *ignore_zeros* was renamed to *ignore_index*.
78
79 .. versionchanged:: 0.4
80 *segmentation_model*, *encoder_name*, and *encoder_weights*
81 were renamed to *model*, *backbone*, and *weights*.
82
83 .. versionadded: 0.5
84 The *class_weights*, *freeze_backbone*, and *freeze_decoder* parameters.
85
86 .. versionchanged:: 0.5
87 The *weights* parameter now supports WeightEnums and checkpoint paths.
88 *learning_rate* and *learning_rate_schedule_patience* were renamed to
89 *lr* and *patience*.
90 """
91 if ignore_index is not None and loss == "jaccard":
92 warnings.warn(
93 "ignore_index has no effect on training when loss='jaccard'",
94 UserWarning,
95 )
96
97 self.weights = weights
98 super().__init__(ignore="weights")
99
100 def configure_losses(self) -> None:
101 """Initialize the loss criterion.
102
103 Raises:
104 ValueError: If *loss* is invalid.
105 """
106 loss: str = self.hparams["loss"]
107 ignore_index = self.hparams["ignore_index"]
108 if loss == "ce":
109 ignore_value = -1000 if ignore_index is None else ignore_index
110 self.criterion = nn.CrossEntropyLoss(
111 ignore_index=ignore_value, weight=self.hparams["class_weights"]
112 )
113 elif loss == "jaccard":
114 self.criterion = smp.losses.JaccardLoss(
115 mode="multiclass", classes=self.hparams["num_classes"]
116 )
117 elif loss == "focal":
118 self.criterion = smp.losses.FocalLoss(
119 "multiclass", ignore_index=ignore_index, normalized=True
120 )
121 else:
122 raise ValueError(
123 f"Loss type '{loss}' is not valid. "
124 "Currently, supports 'ce', 'jaccard' or 'focal' loss."
125 )
126
127 def configure_metrics(self) -> None:
128 """Initialize the performance metrics."""
129 num_classes: int = self.hparams["num_classes"]
130 ignore_index: Optional[int] = self.hparams["ignore_index"]
131 metrics = MetricCollection(
132 [
133 MulticlassAccuracy(
134 num_classes=num_classes,
135 ignore_index=ignore_index,
136 multidim_average="global",
137 average="micro",
138 ),
139 MulticlassJaccardIndex(
140 num_classes=num_classes, ignore_index=ignore_index, average="micro"
141 ),
142 ]
143 )
144 self.train_metrics = metrics.clone(prefix="train_")
145 self.val_metrics = metrics.clone(prefix="val_")
146 self.test_metrics = metrics.clone(prefix="test_")
147
148 def configure_models(self) -> None:
149 """Initialize the model.
150
151 Raises:
152 ValueError: If *model* is invalid.
153 """
154 model: str = self.hparams["model"]
155 backbone: str = self.hparams["backbone"]
156 weights = self.weights
157 in_channels: int = self.hparams["in_channels"]
158 num_classes: int = self.hparams["num_classes"]
159 num_filters: int = self.hparams["num_filters"]
160
161 if model == "unet":
162 self.model = smp.Unet(
163 encoder_name=backbone,
164 encoder_weights="imagenet" if weights is True else None,
165 in_channels=in_channels,
166 classes=num_classes,
167 )
168 elif model == "deeplabv3+":
169 self.model = smp.DeepLabV3Plus(
170 encoder_name=backbone,
171 encoder_weights="imagenet" if weights is True else None,
172 in_channels=in_channels,
173 classes=num_classes,
174 )
175 elif model == "fcn":
176 self.model = FCN(
177 in_channels=in_channels, classes=num_classes, num_filters=num_filters
178 )
179 else:
180 raise ValueError(
181 f"Model type '{model}' is not valid. "
182 "Currently, only supports 'unet', 'deeplabv3+' and 'fcn'."
183 )
184
185 if model != "fcn":
186 if weights and weights is not True:
187 if isinstance(weights, WeightsEnum):
188 state_dict = weights.get_state_dict(progress=True)
189 elif os.path.exists(weights):
190 _, state_dict = utils.extract_backbone(weights)
191 else:
192 state_dict = get_weight(weights).get_state_dict(progress=True)
193 self.model.encoder.load_state_dict(state_dict)
194
195 # Freeze backbone
196 if self.hparams["freeze_backbone"] and model in ["unet", "deeplabv3+"]:
197 for param in self.model.encoder.parameters():
198 param.requires_grad = False
199
200 # Freeze decoder
201 if self.hparams["freeze_decoder"] and model in ["unet", "deeplabv3+"]:
202 for param in self.model.decoder.parameters():
203 param.requires_grad = False
204
205 def training_step(
206 self, batch: Any, batch_idx: int, dataloader_idx: int = 0
207 ) -> Tensor:
208 """Compute the training loss and additional metrics.
209
210 Args:
211 batch: The output of your DataLoader.
212 batch_idx: Integer displaying index of this batch.
213 dataloader_idx: Index of the current dataloader.
214
215 Returns:
216 The loss tensor.
217 """
218 x = batch["image"]
219 y = batch["mask"]
220 y_hat = self(x)
221 loss: Tensor = self.criterion(y_hat, y)
222 self.log("train_loss", loss)
223 self.train_metrics(y_hat, y)
224 self.log_dict(self.train_metrics)
225 return loss
226
227 def validation_step(
228 self, batch: Any, batch_idx: int, dataloader_idx: int = 0
229 ) -> None:
230 """Compute the validation loss and additional metrics.
231
232 Args:
233 batch: The output of your DataLoader.
234 batch_idx: Integer displaying index of this batch.
235 dataloader_idx: Index of the current dataloader.
236 """
237 x = batch["image"]
238 y = batch["mask"]
239 y_hat = self(x)
240 loss = self.criterion(y_hat, y)
241 self.log("val_loss", loss)
242 self.val_metrics(y_hat, y)
243 self.log_dict(self.val_metrics)
244
245 if (
246 batch_idx < 10
247 and hasattr(self.trainer, "datamodule")
248 and hasattr(self.trainer.datamodule, "plot")
249 and self.logger
250 and hasattr(self.logger, "experiment")
251 and hasattr(self.logger.experiment, "add_figure")
252 ):
253 datamodule = self.trainer.datamodule
254 batch["prediction"] = y_hat.argmax(dim=1)
255 for key in ["image", "mask", "prediction"]:
256 batch[key] = batch[key].cpu()
257 sample = unbind_samples(batch)[0]
258
259 fig: Optional[Figure] = None
260 try:
261 fig = datamodule.plot(sample)
262 except RGBBandsMissingError:
263 pass
264
265 if fig:
266 summary_writer = self.logger.experiment
267 summary_writer.add_figure(
268 f"image/{batch_idx}", fig, global_step=self.global_step
269 )
270 plt.close()
271
272 def test_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None:
273 """Compute the test loss and additional metrics.
274
275 Args:
276 batch: The output of your DataLoader.
277 batch_idx: Integer displaying index of this batch.
278 dataloader_idx: Index of the current dataloader.
279 """
280 x = batch["image"]
281 y = batch["mask"]
282 y_hat = self(x)
283 loss = self.criterion(y_hat, y)
284 self.log("test_loss", loss)
285 self.test_metrics(y_hat, y)
286 self.log_dict(self.test_metrics)
287
288 def predict_step(
289 self, batch: Any, batch_idx: int, dataloader_idx: int = 0
290 ) -> Tensor:
291 """Compute the predicted class probabilities.
292
293 Args:
294 batch: The output of your DataLoader.
295 batch_idx: Integer displaying index of this batch.
296 dataloader_idx: Index of the current dataloader.
297
298 Returns:
299 Output predicted probabilities.
300 """
301 x = batch["image"]
302 y_hat: Tensor = self(x).softmax(dim=1)
303 return y_hat
304
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py
--- a/torchgeo/trainers/segmentation.py
+++ b/torchgeo/trainers/segmentation.py
@@ -4,7 +4,6 @@
"""Trainers for semantic segmentation."""
import os
-import warnings
from typing import Any, Optional, Union
import matplotlib.pyplot as plt
@@ -70,9 +69,6 @@
freeze_decoder: Freeze the decoder network to linear probe
the segmentation head.
- Warns:
- UserWarning: When loss='jaccard' and ignore_index is specified.
-
.. versionchanged:: 0.3
*ignore_zeros* was renamed to *ignore_index*.
@@ -87,13 +83,10 @@
The *weights* parameter now supports WeightEnums and checkpoint paths.
*learning_rate* and *learning_rate_schedule_patience* were renamed to
*lr* and *patience*.
- """
- if ignore_index is not None and loss == "jaccard":
- warnings.warn(
- "ignore_index has no effect on training when loss='jaccard'",
- UserWarning,
- )
+ .. versionchanged:: 0.6
+ The *ignore_index* parameter now works for jaccard loss.
+ """
self.weights = weights
super().__init__(ignore="weights")
@@ -111,9 +104,13 @@
ignore_index=ignore_value, weight=self.hparams["class_weights"]
)
elif loss == "jaccard":
- self.criterion = smp.losses.JaccardLoss(
- mode="multiclass", classes=self.hparams["num_classes"]
- )
+ # JaccardLoss requires a list of classes to use instead of a class
+ # index to ignore.
+ classes = [
+ i for i in range(self.hparams["num_classes"]) if i != ignore_index
+ ]
+
+ self.criterion = smp.losses.JaccardLoss(mode="multiclass", classes=classes)
elif loss == "focal":
self.criterion = smp.losses.FocalLoss(
"multiclass", ignore_index=ignore_index, normalized=True
| {"golden_diff": "diff --git a/torchgeo/trainers/segmentation.py b/torchgeo/trainers/segmentation.py\n--- a/torchgeo/trainers/segmentation.py\n+++ b/torchgeo/trainers/segmentation.py\n@@ -4,7 +4,6 @@\n \"\"\"Trainers for semantic segmentation.\"\"\"\n \n import os\n-import warnings\n from typing import Any, Optional, Union\n \n import matplotlib.pyplot as plt\n@@ -70,9 +69,6 @@\n freeze_decoder: Freeze the decoder network to linear probe\n the segmentation head.\n \n- Warns:\n- UserWarning: When loss='jaccard' and ignore_index is specified.\n-\n .. versionchanged:: 0.3\n *ignore_zeros* was renamed to *ignore_index*.\n \n@@ -87,13 +83,10 @@\n The *weights* parameter now supports WeightEnums and checkpoint paths.\n *learning_rate* and *learning_rate_schedule_patience* were renamed to\n *lr* and *patience*.\n- \"\"\"\n- if ignore_index is not None and loss == \"jaccard\":\n- warnings.warn(\n- \"ignore_index has no effect on training when loss='jaccard'\",\n- UserWarning,\n- )\n \n+ .. versionchanged:: 0.6\n+ The *ignore_index* parameter now works for jaccard loss.\n+ \"\"\"\n self.weights = weights\n super().__init__(ignore=\"weights\")\n \n@@ -111,9 +104,13 @@\n ignore_index=ignore_value, weight=self.hparams[\"class_weights\"]\n )\n elif loss == \"jaccard\":\n- self.criterion = smp.losses.JaccardLoss(\n- mode=\"multiclass\", classes=self.hparams[\"num_classes\"]\n- )\n+ # JaccardLoss requires a list of classes to use instead of a class\n+ # index to ignore.\n+ classes = [\n+ i for i in range(self.hparams[\"num_classes\"]) if i != ignore_index\n+ ]\n+\n+ self.criterion = smp.losses.JaccardLoss(mode=\"multiclass\", classes=classes)\n elif loss == \"focal\":\n self.criterion = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=ignore_index, normalized=True\n", "issue": "Add `ignore_index` support for Jaccard Loss\n### Summary\r\n\r\nCurrently, the `SemanticSegmentationTask` recognises the `ignore_index` parameter only when cross entropy and focal loss is used. However, `smp.losses.JaccardLoss` implicitly supports this option via its `classes` parameter, which is currently set to `self.hparams[\"num_classes\"]`. The FR is to adapt what is passed as argument to this parameter so that `ignore_index` can work with all currently supported losses.\r\n\r\n### Rationale\r\n\r\nThe Jaccard index is a common semantic segmentation metric which also makes for a decent loss function. Because of the way it is defined, it is important to ignore overly dominant classes (e.g., the background when classifying building rooftops); otherwise performance can be hindered significantly.\r\n\r\n### Implementation\r\n\r\nChange the `classes` argument of `smp.losses.JaccardLoss` in `SemanticSegmentationTask.configure_losses` from `self.hparams[\"num_classes\"]` to `list(set(list(range(self.hparams[\"num_classes\"]))).difference(set([ignore_index])))`, assuming that `ignore_index` is not `None`.\r\n\r\n### Alternatives\r\n\r\n_No response_\r\n\r\n### Additional information\r\n\r\n_No response_\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Trainers for semantic segmentation.\"\"\"\n\nimport os\nimport warnings\nfrom typing import Any, Optional, Union\n\nimport matplotlib.pyplot as plt\nimport segmentation_models_pytorch as smp\nimport torch.nn as nn\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\nfrom torchmetrics import MetricCollection\nfrom torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex\nfrom torchvision.models._api import WeightsEnum\n\nfrom ..datasets import RGBBandsMissingError, unbind_samples\nfrom ..models import FCN, get_weight\nfrom . import utils\nfrom .base import BaseTask\n\n\nclass SemanticSegmentationTask(BaseTask):\n \"\"\"Semantic Segmentation.\"\"\"\n\n def __init__(\n self,\n model: str = \"unet\",\n backbone: str = \"resnet50\",\n weights: Optional[Union[WeightsEnum, str, bool]] = None,\n in_channels: int = 3,\n num_classes: int = 1000,\n num_filters: int = 3,\n loss: str = \"ce\",\n class_weights: Optional[Tensor] = None,\n ignore_index: Optional[int] = None,\n lr: float = 1e-3,\n patience: int = 10,\n freeze_backbone: bool = False,\n freeze_decoder: bool = False,\n ) -> None:\n \"\"\"Inititalize a new SemanticSegmentationTask instance.\n\n Args:\n model: Name of the\n `smp <https://smp.readthedocs.io/en/latest/models.html>`__ model to use.\n backbone: Name of the `timm\n <https://smp.readthedocs.io/en/latest/encoders_timm.html>`__ or `smp\n <https://smp.readthedocs.io/en/latest/encoders.html>`__ backbone to use.\n weights: Initial model weights. Either a weight enum, the string\n representation of a weight enum, True for ImageNet weights, False or\n None for random weights, or the path to a saved model state dict. FCN\n model does not support pretrained weights. Pretrained ViT weight enums\n are not supported yet.\n in_channels: Number of input channels to model.\n num_classes: Number of prediction classes.\n num_filters: Number of filters. Only applicable when model='fcn'.\n loss: Name of the loss function, currently supports\n 'ce', 'jaccard' or 'focal' loss.\n class_weights: Optional rescaling weight given to each\n class and used with 'ce' loss.\n ignore_index: Optional integer class index to ignore in the loss and\n metrics.\n lr: Learning rate for optimizer.\n patience: Patience for learning rate scheduler.\n freeze_backbone: Freeze the backbone network to fine-tune the\n decoder and segmentation head.\n freeze_decoder: Freeze the decoder network to linear probe\n the segmentation head.\n\n Warns:\n UserWarning: When loss='jaccard' and ignore_index is specified.\n\n .. versionchanged:: 0.3\n *ignore_zeros* was renamed to *ignore_index*.\n\n .. versionchanged:: 0.4\n *segmentation_model*, *encoder_name*, and *encoder_weights*\n were renamed to *model*, *backbone*, and *weights*.\n\n .. versionadded: 0.5\n The *class_weights*, *freeze_backbone*, and *freeze_decoder* parameters.\n\n .. versionchanged:: 0.5\n The *weights* parameter now supports WeightEnums and checkpoint paths.\n *learning_rate* and *learning_rate_schedule_patience* were renamed to\n *lr* and *patience*.\n \"\"\"\n if ignore_index is not None and loss == \"jaccard\":\n warnings.warn(\n \"ignore_index has no effect on training when loss='jaccard'\",\n UserWarning,\n )\n\n self.weights = weights\n super().__init__(ignore=\"weights\")\n\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\n\n Raises:\n ValueError: If *loss* is invalid.\n \"\"\"\n loss: str = self.hparams[\"loss\"]\n ignore_index = self.hparams[\"ignore_index\"]\n if loss == \"ce\":\n ignore_value = -1000 if ignore_index is None else ignore_index\n self.criterion = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=self.hparams[\"class_weights\"]\n )\n elif loss == \"jaccard\":\n self.criterion = smp.losses.JaccardLoss(\n mode=\"multiclass\", classes=self.hparams[\"num_classes\"]\n )\n elif loss == \"focal\":\n self.criterion = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{loss}' is not valid. \"\n \"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n def configure_metrics(self) -> None:\n \"\"\"Initialize the performance metrics.\"\"\"\n num_classes: int = self.hparams[\"num_classes\"]\n ignore_index: Optional[int] = self.hparams[\"ignore_index\"]\n metrics = MetricCollection(\n [\n MulticlassAccuracy(\n num_classes=num_classes,\n ignore_index=ignore_index,\n multidim_average=\"global\",\n average=\"micro\",\n ),\n MulticlassJaccardIndex(\n num_classes=num_classes, ignore_index=ignore_index, average=\"micro\"\n ),\n ]\n )\n self.train_metrics = metrics.clone(prefix=\"train_\")\n self.val_metrics = metrics.clone(prefix=\"val_\")\n self.test_metrics = metrics.clone(prefix=\"test_\")\n\n def configure_models(self) -> None:\n \"\"\"Initialize the model.\n\n Raises:\n ValueError: If *model* is invalid.\n \"\"\"\n model: str = self.hparams[\"model\"]\n backbone: str = self.hparams[\"backbone\"]\n weights = self.weights\n in_channels: int = self.hparams[\"in_channels\"]\n num_classes: int = self.hparams[\"num_classes\"]\n num_filters: int = self.hparams[\"num_filters\"]\n\n if model == \"unet\":\n self.model = smp.Unet(\n encoder_name=backbone,\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=in_channels,\n classes=num_classes,\n )\n elif model == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=backbone,\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=in_channels,\n classes=num_classes,\n )\n elif model == \"fcn\":\n self.model = FCN(\n in_channels=in_channels, classes=num_classes, num_filters=num_filters\n )\n else:\n raise ValueError(\n f\"Model type '{model}' is not valid. \"\n \"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if model != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hparams[\"freeze_backbone\"] and model in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hparams[\"freeze_decoder\"] and model in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False\n\n def training_step(\n self, batch: Any, batch_idx: int, dataloader_idx: int = 0\n ) -> Tensor:\n \"\"\"Compute the training loss and additional metrics.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n\n Returns:\n The loss tensor.\n \"\"\"\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n loss: Tensor = self.criterion(y_hat, y)\n self.log(\"train_loss\", loss)\n self.train_metrics(y_hat, y)\n self.log_dict(self.train_metrics)\n return loss\n\n def validation_step(\n self, batch: Any, batch_idx: int, dataloader_idx: int = 0\n ) -> None:\n \"\"\"Compute the validation loss and additional metrics.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n \"\"\"\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.log(\"val_loss\", loss)\n self.val_metrics(y_hat, y)\n self.log_dict(self.val_metrics)\n\n if (\n batch_idx < 10\n and hasattr(self.trainer, \"datamodule\")\n and hasattr(self.trainer.datamodule, \"plot\")\n and self.logger\n and hasattr(self.logger, \"experiment\")\n and hasattr(self.logger.experiment, \"add_figure\")\n ):\n datamodule = self.trainer.datamodule\n batch[\"prediction\"] = y_hat.argmax(dim=1)\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n\n fig: Optional[Figure] = None\n try:\n fig = datamodule.plot(sample)\n except RGBBandsMissingError:\n pass\n\n if fig:\n summary_writer = self.logger.experiment\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n\n def test_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None:\n \"\"\"Compute the test loss and additional metrics.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n \"\"\"\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.log(\"test_loss\", loss)\n self.test_metrics(y_hat, y)\n self.log_dict(self.test_metrics)\n\n def predict_step(\n self, batch: Any, batch_idx: int, dataloader_idx: int = 0\n ) -> Tensor:\n \"\"\"Compute the predicted class probabilities.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n\n Returns:\n Output predicted probabilities.\n \"\"\"\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat\n", "path": "torchgeo/trainers/segmentation.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License.\n\n\"\"\"Trainers for semantic segmentation.\"\"\"\n\nimport os\nfrom typing import Any, Optional, Union\n\nimport matplotlib.pyplot as plt\nimport segmentation_models_pytorch as smp\nimport torch.nn as nn\nfrom matplotlib.figure import Figure\nfrom torch import Tensor\nfrom torchmetrics import MetricCollection\nfrom torchmetrics.classification import MulticlassAccuracy, MulticlassJaccardIndex\nfrom torchvision.models._api import WeightsEnum\n\nfrom ..datasets import RGBBandsMissingError, unbind_samples\nfrom ..models import FCN, get_weight\nfrom . import utils\nfrom .base import BaseTask\n\n\nclass SemanticSegmentationTask(BaseTask):\n \"\"\"Semantic Segmentation.\"\"\"\n\n def __init__(\n self,\n model: str = \"unet\",\n backbone: str = \"resnet50\",\n weights: Optional[Union[WeightsEnum, str, bool]] = None,\n in_channels: int = 3,\n num_classes: int = 1000,\n num_filters: int = 3,\n loss: str = \"ce\",\n class_weights: Optional[Tensor] = None,\n ignore_index: Optional[int] = None,\n lr: float = 1e-3,\n patience: int = 10,\n freeze_backbone: bool = False,\n freeze_decoder: bool = False,\n ) -> None:\n \"\"\"Inititalize a new SemanticSegmentationTask instance.\n\n Args:\n model: Name of the\n `smp <https://smp.readthedocs.io/en/latest/models.html>`__ model to use.\n backbone: Name of the `timm\n <https://smp.readthedocs.io/en/latest/encoders_timm.html>`__ or `smp\n <https://smp.readthedocs.io/en/latest/encoders.html>`__ backbone to use.\n weights: Initial model weights. Either a weight enum, the string\n representation of a weight enum, True for ImageNet weights, False or\n None for random weights, or the path to a saved model state dict. FCN\n model does not support pretrained weights. Pretrained ViT weight enums\n are not supported yet.\n in_channels: Number of input channels to model.\n num_classes: Number of prediction classes.\n num_filters: Number of filters. Only applicable when model='fcn'.\n loss: Name of the loss function, currently supports\n 'ce', 'jaccard' or 'focal' loss.\n class_weights: Optional rescaling weight given to each\n class and used with 'ce' loss.\n ignore_index: Optional integer class index to ignore in the loss and\n metrics.\n lr: Learning rate for optimizer.\n patience: Patience for learning rate scheduler.\n freeze_backbone: Freeze the backbone network to fine-tune the\n decoder and segmentation head.\n freeze_decoder: Freeze the decoder network to linear probe\n the segmentation head.\n\n .. versionchanged:: 0.3\n *ignore_zeros* was renamed to *ignore_index*.\n\n .. versionchanged:: 0.4\n *segmentation_model*, *encoder_name*, and *encoder_weights*\n were renamed to *model*, *backbone*, and *weights*.\n\n .. versionadded: 0.5\n The *class_weights*, *freeze_backbone*, and *freeze_decoder* parameters.\n\n .. versionchanged:: 0.5\n The *weights* parameter now supports WeightEnums and checkpoint paths.\n *learning_rate* and *learning_rate_schedule_patience* were renamed to\n *lr* and *patience*.\n\n .. versionchanged:: 0.6\n The *ignore_index* parameter now works for jaccard loss.\n \"\"\"\n self.weights = weights\n super().__init__(ignore=\"weights\")\n\n def configure_losses(self) -> None:\n \"\"\"Initialize the loss criterion.\n\n Raises:\n ValueError: If *loss* is invalid.\n \"\"\"\n loss: str = self.hparams[\"loss\"]\n ignore_index = self.hparams[\"ignore_index\"]\n if loss == \"ce\":\n ignore_value = -1000 if ignore_index is None else ignore_index\n self.criterion = nn.CrossEntropyLoss(\n ignore_index=ignore_value, weight=self.hparams[\"class_weights\"]\n )\n elif loss == \"jaccard\":\n # JaccardLoss requires a list of classes to use instead of a class\n # index to ignore.\n classes = [\n i for i in range(self.hparams[\"num_classes\"]) if i != ignore_index\n ]\n\n self.criterion = smp.losses.JaccardLoss(mode=\"multiclass\", classes=classes)\n elif loss == \"focal\":\n self.criterion = smp.losses.FocalLoss(\n \"multiclass\", ignore_index=ignore_index, normalized=True\n )\n else:\n raise ValueError(\n f\"Loss type '{loss}' is not valid. \"\n \"Currently, supports 'ce', 'jaccard' or 'focal' loss.\"\n )\n\n def configure_metrics(self) -> None:\n \"\"\"Initialize the performance metrics.\"\"\"\n num_classes: int = self.hparams[\"num_classes\"]\n ignore_index: Optional[int] = self.hparams[\"ignore_index\"]\n metrics = MetricCollection(\n [\n MulticlassAccuracy(\n num_classes=num_classes,\n ignore_index=ignore_index,\n multidim_average=\"global\",\n average=\"micro\",\n ),\n MulticlassJaccardIndex(\n num_classes=num_classes, ignore_index=ignore_index, average=\"micro\"\n ),\n ]\n )\n self.train_metrics = metrics.clone(prefix=\"train_\")\n self.val_metrics = metrics.clone(prefix=\"val_\")\n self.test_metrics = metrics.clone(prefix=\"test_\")\n\n def configure_models(self) -> None:\n \"\"\"Initialize the model.\n\n Raises:\n ValueError: If *model* is invalid.\n \"\"\"\n model: str = self.hparams[\"model\"]\n backbone: str = self.hparams[\"backbone\"]\n weights = self.weights\n in_channels: int = self.hparams[\"in_channels\"]\n num_classes: int = self.hparams[\"num_classes\"]\n num_filters: int = self.hparams[\"num_filters\"]\n\n if model == \"unet\":\n self.model = smp.Unet(\n encoder_name=backbone,\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=in_channels,\n classes=num_classes,\n )\n elif model == \"deeplabv3+\":\n self.model = smp.DeepLabV3Plus(\n encoder_name=backbone,\n encoder_weights=\"imagenet\" if weights is True else None,\n in_channels=in_channels,\n classes=num_classes,\n )\n elif model == \"fcn\":\n self.model = FCN(\n in_channels=in_channels, classes=num_classes, num_filters=num_filters\n )\n else:\n raise ValueError(\n f\"Model type '{model}' is not valid. \"\n \"Currently, only supports 'unet', 'deeplabv3+' and 'fcn'.\"\n )\n\n if model != \"fcn\":\n if weights and weights is not True:\n if isinstance(weights, WeightsEnum):\n state_dict = weights.get_state_dict(progress=True)\n elif os.path.exists(weights):\n _, state_dict = utils.extract_backbone(weights)\n else:\n state_dict = get_weight(weights).get_state_dict(progress=True)\n self.model.encoder.load_state_dict(state_dict)\n\n # Freeze backbone\n if self.hparams[\"freeze_backbone\"] and model in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.encoder.parameters():\n param.requires_grad = False\n\n # Freeze decoder\n if self.hparams[\"freeze_decoder\"] and model in [\"unet\", \"deeplabv3+\"]:\n for param in self.model.decoder.parameters():\n param.requires_grad = False\n\n def training_step(\n self, batch: Any, batch_idx: int, dataloader_idx: int = 0\n ) -> Tensor:\n \"\"\"Compute the training loss and additional metrics.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n\n Returns:\n The loss tensor.\n \"\"\"\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n loss: Tensor = self.criterion(y_hat, y)\n self.log(\"train_loss\", loss)\n self.train_metrics(y_hat, y)\n self.log_dict(self.train_metrics)\n return loss\n\n def validation_step(\n self, batch: Any, batch_idx: int, dataloader_idx: int = 0\n ) -> None:\n \"\"\"Compute the validation loss and additional metrics.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n \"\"\"\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.log(\"val_loss\", loss)\n self.val_metrics(y_hat, y)\n self.log_dict(self.val_metrics)\n\n if (\n batch_idx < 10\n and hasattr(self.trainer, \"datamodule\")\n and hasattr(self.trainer.datamodule, \"plot\")\n and self.logger\n and hasattr(self.logger, \"experiment\")\n and hasattr(self.logger.experiment, \"add_figure\")\n ):\n datamodule = self.trainer.datamodule\n batch[\"prediction\"] = y_hat.argmax(dim=1)\n for key in [\"image\", \"mask\", \"prediction\"]:\n batch[key] = batch[key].cpu()\n sample = unbind_samples(batch)[0]\n\n fig: Optional[Figure] = None\n try:\n fig = datamodule.plot(sample)\n except RGBBandsMissingError:\n pass\n\n if fig:\n summary_writer = self.logger.experiment\n summary_writer.add_figure(\n f\"image/{batch_idx}\", fig, global_step=self.global_step\n )\n plt.close()\n\n def test_step(self, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None:\n \"\"\"Compute the test loss and additional metrics.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n \"\"\"\n x = batch[\"image\"]\n y = batch[\"mask\"]\n y_hat = self(x)\n loss = self.criterion(y_hat, y)\n self.log(\"test_loss\", loss)\n self.test_metrics(y_hat, y)\n self.log_dict(self.test_metrics)\n\n def predict_step(\n self, batch: Any, batch_idx: int, dataloader_idx: int = 0\n ) -> Tensor:\n \"\"\"Compute the predicted class probabilities.\n\n Args:\n batch: The output of your DataLoader.\n batch_idx: Integer displaying index of this batch.\n dataloader_idx: Index of the current dataloader.\n\n Returns:\n Output predicted probabilities.\n \"\"\"\n x = batch[\"image\"]\n y_hat: Tensor = self(x).softmax(dim=1)\n return y_hat\n", "path": "torchgeo/trainers/segmentation.py"}]} | 3,773 | 503 |
gh_patches_debug_43343 | rasdani/github-patches | git_diff | nonebot__nonebot2-947 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Bug: MessageTemplate.format 将消息段错误拼接为文本
**描述问题:**
`MessageTemplate.format` 将非文本类型消息段错误拼接为文本类型
**如何复现?**
```python
>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment
>>> Message.template("{}{}").format(MessageSegment.image("file:///"), "hello")
[MessageSegment(type='text', data={'text': '[CQ:image,file=file:///,cache=true,proxy=true]'}), MessageSegment(type='text', data={'text': 'hello'})]
```
**期望的结果**
```python
>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment
>>> Message.template("{}{}").format(MessageSegment.image("file:///"), "hello")
[MessageSegment(type='image', data={'file': 'file:///', 'type': None, 'cache': 'true', 'proxy': 'true', 'timeout': None}), MessageSegment(type='text', data={'text': 'hello'})]
```
**环境信息:**
- OS: Windows 10
- Python Version: 3.9.6
- Nonebot Version: 2.0.0-beta2
**截图或日志**

**备注**
我自己写了一段修复代码(不保证稳定性)
[`template.py`](https://github.com/nonebot/nonebot2/blob/master/nonebot/internal/adapter/template.py)将原来的
```python
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
```
替换为
```python
from .message import MessageSegment
if isinstance(obj, MessageSegment):
results.append(obj)
else:
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
```
修复后效果如下

Bug: MessageTemplate.format 将消息段错误拼接为文本
**描述问题:**
`MessageTemplate.format` 将非文本类型消息段错误拼接为文本类型
**如何复现?**
```python
>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment
>>> Message.template("{}{}").format(MessageSegment.image("file:///"), "hello")
[MessageSegment(type='text', data={'text': '[CQ:image,file=file:///,cache=true,proxy=true]'}), MessageSegment(type='text', data={'text': 'hello'})]
```
**期望的结果**
```python
>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment
>>> Message.template("{}{}").format(MessageSegment.image("file:///"), "hello")
[MessageSegment(type='image', data={'file': 'file:///', 'type': None, 'cache': 'true', 'proxy': 'true', 'timeout': None}), MessageSegment(type='text', data={'text': 'hello'})]
```
**环境信息:**
- OS: Windows 10
- Python Version: 3.9.6
- Nonebot Version: 2.0.0-beta2
**截图或日志**

**备注**
我自己写了一段修复代码(不保证稳定性)
[`template.py`](https://github.com/nonebot/nonebot2/blob/master/nonebot/internal/adapter/template.py)将原来的
```python
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
```
替换为
```python
from .message import MessageSegment
if isinstance(obj, MessageSegment):
results.append(obj)
else:
formatted_text = self.format_field(obj, str(format_control))
results.append(formatted_text)
```
修复后效果如下

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nonebot/internal/adapter/template.py`
Content:
```
1 import functools
2 from string import Formatter
3 from typing import (
4 TYPE_CHECKING,
5 Any,
6 Set,
7 Dict,
8 List,
9 Type,
10 Tuple,
11 Union,
12 Generic,
13 Mapping,
14 TypeVar,
15 Callable,
16 Optional,
17 Sequence,
18 cast,
19 overload,
20 )
21
22 if TYPE_CHECKING:
23 from .message import Message, MessageSegment
24
25 TM = TypeVar("TM", bound="Message")
26 TF = TypeVar("TF", str, "Message")
27
28 FormatSpecFunc = Callable[[Any], str]
29 FormatSpecFunc_T = TypeVar("FormatSpecFunc_T", bound=FormatSpecFunc)
30
31
32 class MessageTemplate(Formatter, Generic[TF]):
33 """消息模板格式化实现类。
34
35 参数:
36 template: 模板
37 factory: 消息类型工厂,默认为 `str`
38 """
39
40 @overload
41 def __init__(
42 self: "MessageTemplate[str]", template: str, factory: Type[str] = str
43 ) -> None:
44 ...
45
46 @overload
47 def __init__(
48 self: "MessageTemplate[TM]", template: Union[str, TM], factory: Type[TM]
49 ) -> None:
50 ...
51
52 def __init__(self, template, factory=str) -> None:
53 self.template: TF = template
54 self.factory: Type[TF] = factory
55 self.format_specs: Dict[str, FormatSpecFunc] = {}
56
57 def add_format_spec(
58 self, spec: FormatSpecFunc_T, name: Optional[str] = None
59 ) -> FormatSpecFunc_T:
60 name = name or spec.__name__
61 if name in self.format_specs:
62 raise ValueError(f"Format spec {name} already exists!")
63 self.format_specs[name] = spec
64 return spec
65
66 def format(self, *args, **kwargs):
67 """根据传入参数和模板生成消息对象"""
68 return self._format(args, kwargs)
69
70 def format_map(self, mapping: Mapping[str, Any]) -> TF:
71 """根据传入字典和模板生成消息对象, 在传入字段名不是有效标识符时有用"""
72 return self._format([], mapping)
73
74 def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:
75 msg = self.factory()
76 if isinstance(self.template, str):
77 msg += self.vformat(self.template, args, kwargs)
78 elif isinstance(self.template, self.factory):
79 template = cast("Message[MessageSegment]", self.template)
80 for seg in template:
81 msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg
82 else:
83 raise TypeError("template must be a string or instance of Message!")
84
85 return msg # type:ignore
86
87 def vformat(
88 self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]
89 ) -> TF:
90 used_args = set()
91 result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
92 self.check_unused_args(list(used_args), args, kwargs)
93 return result
94
95 def _vformat(
96 self,
97 format_string: str,
98 args: Sequence[Any],
99 kwargs: Mapping[str, Any],
100 used_args: Set[Union[int, str]],
101 recursion_depth: int,
102 auto_arg_index: int = 0,
103 ) -> Tuple[TF, int]:
104 if recursion_depth < 0:
105 raise ValueError("Max string recursion exceeded")
106
107 results: List[Any] = [self.factory()]
108
109 for (literal_text, field_name, format_spec, conversion) in self.parse(
110 format_string
111 ):
112
113 # output the literal text
114 if literal_text:
115 results.append(literal_text)
116
117 # if there's a field, output it
118 if field_name is not None:
119 # this is some markup, find the object and do
120 # the formatting
121
122 # handle arg indexing when empty field_names are given.
123 if field_name == "":
124 if auto_arg_index is False:
125 raise ValueError(
126 "cannot switch from manual field specification to "
127 "automatic field numbering"
128 )
129 field_name = str(auto_arg_index)
130 auto_arg_index += 1
131 elif field_name.isdigit():
132 if auto_arg_index:
133 raise ValueError(
134 "cannot switch from manual field specification to "
135 "automatic field numbering"
136 )
137 # disable auto arg incrementing, if it gets
138 # used later on, then an exception will be raised
139 auto_arg_index = False
140
141 # given the field_name, find the object it references
142 # and the argument it came from
143 obj, arg_used = self.get_field(field_name, args, kwargs)
144 used_args.add(arg_used)
145
146 assert format_spec is not None
147
148 # do any conversion on the resulting object
149 obj = self.convert_field(obj, conversion) if conversion else obj
150
151 # expand the format spec, if needed
152 format_control, auto_arg_index = self._vformat(
153 format_spec,
154 args,
155 kwargs,
156 used_args,
157 recursion_depth - 1,
158 auto_arg_index,
159 )
160
161 # format the object and append to the result
162 formatted_text = self.format_field(obj, str(format_control))
163 results.append(formatted_text)
164
165 return functools.reduce(self._add, results), auto_arg_index
166
167 def format_field(self, value: Any, format_spec: str) -> Any:
168 formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)
169 if formatter is None and not issubclass(self.factory, str):
170 segment_class: Type["MessageSegment"] = self.factory.get_segment_class()
171 method = getattr(segment_class, format_spec, None)
172 if callable(method) and not cast(str, method.__name__).startswith("_"):
173 formatter = getattr(segment_class, format_spec)
174 return (
175 super().format_field(value, format_spec)
176 if formatter is None
177 else formatter(value)
178 )
179
180 def _add(self, a: Any, b: Any) -> Any:
181 try:
182 return a + b
183 except TypeError:
184 return a + str(b)
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/nonebot/internal/adapter/template.py b/nonebot/internal/adapter/template.py
--- a/nonebot/internal/adapter/template.py
+++ b/nonebot/internal/adapter/template.py
@@ -49,7 +49,9 @@
) -> None:
...
- def __init__(self, template, factory=str) -> None:
+ def __init__( # type:ignore
+ self, template, factory=str
+ ) -> None: # TODO: fix type hint here
self.template: TF = template
self.factory: Type[TF] = factory
self.format_specs: Dict[str, FormatSpecFunc] = {}
@@ -72,25 +74,37 @@
return self._format([], mapping)
def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:
- msg = self.factory()
+ full_message = self.factory()
+ used_args, arg_index = set(), 0
+
if isinstance(self.template, str):
- msg += self.vformat(self.template, args, kwargs)
+ msg, arg_index = self._vformat(
+ self.template, args, kwargs, used_args, arg_index
+ )
+ full_message += msg
elif isinstance(self.template, self.factory):
template = cast("Message[MessageSegment]", self.template)
for seg in template:
- msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg
+ if not seg.is_text():
+ full_message += seg
+ else:
+ msg, arg_index = self._vformat(
+ str(seg), args, kwargs, used_args, arg_index
+ )
+ full_message += msg
else:
raise TypeError("template must be a string or instance of Message!")
- return msg # type:ignore
+ self.check_unused_args(list(used_args), args, kwargs)
+ return cast(TF, full_message)
def vformat(
- self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]
+ self,
+ format_string: str,
+ args: Sequence[Any],
+ kwargs: Mapping[str, Any],
) -> TF:
- used_args = set()
- result, _ = self._vformat(format_string, args, kwargs, used_args, 2)
- self.check_unused_args(list(used_args), args, kwargs)
- return result
+ raise NotImplementedError("`vformat` has merged into `_format`")
def _vformat(
self,
@@ -98,12 +112,8 @@
args: Sequence[Any],
kwargs: Mapping[str, Any],
used_args: Set[Union[int, str]],
- recursion_depth: int,
auto_arg_index: int = 0,
) -> Tuple[TF, int]:
- if recursion_depth < 0:
- raise ValueError("Max string recursion exceeded")
-
results: List[Any] = [self.factory()]
for (literal_text, field_name, format_spec, conversion) in self.parse(
@@ -143,23 +153,13 @@
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
- assert format_spec is not None
-
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion) if conversion else obj
- # expand the format spec, if needed
- format_control, auto_arg_index = self._vformat(
- format_spec,
- args,
- kwargs,
- used_args,
- recursion_depth - 1,
- auto_arg_index,
- )
-
# format the object and append to the result
- formatted_text = self.format_field(obj, str(format_control))
+ formatted_text = (
+ self.format_field(obj, format_spec) if format_spec else obj
+ )
results.append(formatted_text)
return functools.reduce(self._add, results), auto_arg_index
| {"golden_diff": "diff --git a/nonebot/internal/adapter/template.py b/nonebot/internal/adapter/template.py\n--- a/nonebot/internal/adapter/template.py\n+++ b/nonebot/internal/adapter/template.py\n@@ -49,7 +49,9 @@\n ) -> None:\n ...\n \n- def __init__(self, template, factory=str) -> None:\n+ def __init__( # type:ignore\n+ self, template, factory=str\n+ ) -> None: # TODO: fix type hint here\n self.template: TF = template\n self.factory: Type[TF] = factory\n self.format_specs: Dict[str, FormatSpecFunc] = {}\n@@ -72,25 +74,37 @@\n return self._format([], mapping)\n \n def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:\n- msg = self.factory()\n+ full_message = self.factory()\n+ used_args, arg_index = set(), 0\n+\n if isinstance(self.template, str):\n- msg += self.vformat(self.template, args, kwargs)\n+ msg, arg_index = self._vformat(\n+ self.template, args, kwargs, used_args, arg_index\n+ )\n+ full_message += msg\n elif isinstance(self.template, self.factory):\n template = cast(\"Message[MessageSegment]\", self.template)\n for seg in template:\n- msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg\n+ if not seg.is_text():\n+ full_message += seg\n+ else:\n+ msg, arg_index = self._vformat(\n+ str(seg), args, kwargs, used_args, arg_index\n+ )\n+ full_message += msg\n else:\n raise TypeError(\"template must be a string or instance of Message!\")\n \n- return msg # type:ignore\n+ self.check_unused_args(list(used_args), args, kwargs)\n+ return cast(TF, full_message)\n \n def vformat(\n- self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]\n+ self,\n+ format_string: str,\n+ args: Sequence[Any],\n+ kwargs: Mapping[str, Any],\n ) -> TF:\n- used_args = set()\n- result, _ = self._vformat(format_string, args, kwargs, used_args, 2)\n- self.check_unused_args(list(used_args), args, kwargs)\n- return result\n+ raise NotImplementedError(\"`vformat` has merged into `_format`\")\n \n def _vformat(\n self,\n@@ -98,12 +112,8 @@\n args: Sequence[Any],\n kwargs: Mapping[str, Any],\n used_args: Set[Union[int, str]],\n- recursion_depth: int,\n auto_arg_index: int = 0,\n ) -> Tuple[TF, int]:\n- if recursion_depth < 0:\n- raise ValueError(\"Max string recursion exceeded\")\n-\n results: List[Any] = [self.factory()]\n \n for (literal_text, field_name, format_spec, conversion) in self.parse(\n@@ -143,23 +153,13 @@\n obj, arg_used = self.get_field(field_name, args, kwargs)\n used_args.add(arg_used)\n \n- assert format_spec is not None\n-\n # do any conversion on the resulting object\n obj = self.convert_field(obj, conversion) if conversion else obj\n \n- # expand the format spec, if needed\n- format_control, auto_arg_index = self._vformat(\n- format_spec,\n- args,\n- kwargs,\n- used_args,\n- recursion_depth - 1,\n- auto_arg_index,\n- )\n-\n # format the object and append to the result\n- formatted_text = self.format_field(obj, str(format_control))\n+ formatted_text = (\n+ self.format_field(obj, format_spec) if format_spec else obj\n+ )\n results.append(formatted_text)\n \n return functools.reduce(self._add, results), auto_arg_index\n", "issue": "Bug: MessageTemplate.format \u5c06\u6d88\u606f\u6bb5\u9519\u8bef\u62fc\u63a5\u4e3a\u6587\u672c\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n`MessageTemplate.format` \u5c06\u975e\u6587\u672c\u7c7b\u578b\u6d88\u606f\u6bb5\u9519\u8bef\u62fc\u63a5\u4e3a\u6587\u672c\u7c7b\u578b\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n```python\r\n>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment\r\n>>> Message.template(\"{}{}\").format(MessageSegment.image(\"file:///\"), \"hello\")\r\n[MessageSegment(type='text', data={'text': '[CQ:image,file=file:///,cache=true,proxy=true]'}), MessageSegment(type='text', data={'text': 'hello'})]\r\n```\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n```python\r\n>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment\r\n>>> Message.template(\"{}{}\").format(MessageSegment.image(\"file:///\"), \"hello\")\r\n[MessageSegment(type='image', data={'file': 'file:///', 'type': None, 'cache': 'true', 'proxy': 'true', 'timeout': None}), MessageSegment(type='text', data={'text': 'hello'})]\r\n```\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: Windows 10\r\n - Python Version: 3.9.6\r\n - Nonebot Version: 2.0.0-beta2\r\n\r\n**\u622a\u56fe\u6216\u65e5\u5fd7**\r\n\r\n\r\n\r\n**\u5907\u6ce8**\r\n\r\n\u6211\u81ea\u5df1\u5199\u4e86\u4e00\u6bb5\u4fee\u590d\u4ee3\u7801\uff08\u4e0d\u4fdd\u8bc1\u7a33\u5b9a\u6027\uff09\r\n\r\n[`template.py`](https://github.com/nonebot/nonebot2/blob/master/nonebot/internal/adapter/template.py)\u5c06\u539f\u6765\u7684\r\n\r\n```python\r\nformatted_text = self.format_field(obj, str(format_control))\r\nresults.append(formatted_text)\r\n```\r\n\u66ff\u6362\u4e3a\r\n```python\r\nfrom .message import MessageSegment\r\nif isinstance(obj, MessageSegment):\r\n results.append(obj)\r\nelse:\r\n formatted_text = self.format_field(obj, str(format_control))\r\n results.append(formatted_text)\r\n```\r\n\r\n\u4fee\u590d\u540e\u6548\u679c\u5982\u4e0b\r\n\r\n\nBug: MessageTemplate.format \u5c06\u6d88\u606f\u6bb5\u9519\u8bef\u62fc\u63a5\u4e3a\u6587\u672c\n**\u63cf\u8ff0\u95ee\u9898\uff1a**\r\n\r\n`MessageTemplate.format` \u5c06\u975e\u6587\u672c\u7c7b\u578b\u6d88\u606f\u6bb5\u9519\u8bef\u62fc\u63a5\u4e3a\u6587\u672c\u7c7b\u578b\r\n\r\n**\u5982\u4f55\u590d\u73b0\uff1f**\r\n\r\n```python\r\n>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment\r\n>>> Message.template(\"{}{}\").format(MessageSegment.image(\"file:///\"), \"hello\")\r\n[MessageSegment(type='text', data={'text': '[CQ:image,file=file:///,cache=true,proxy=true]'}), MessageSegment(type='text', data={'text': 'hello'})]\r\n```\r\n\r\n**\u671f\u671b\u7684\u7ed3\u679c**\r\n\r\n```python\r\n>>> from nonebot.adapters.onebot.v11 import Message, MessageSegment\r\n>>> Message.template(\"{}{}\").format(MessageSegment.image(\"file:///\"), \"hello\")\r\n[MessageSegment(type='image', data={'file': 'file:///', 'type': None, 'cache': 'true', 'proxy': 'true', 'timeout': None}), MessageSegment(type='text', data={'text': 'hello'})]\r\n```\r\n\r\n**\u73af\u5883\u4fe1\u606f\uff1a**\r\n\r\n - OS: Windows 10\r\n - Python Version: 3.9.6\r\n - Nonebot Version: 2.0.0-beta2\r\n\r\n**\u622a\u56fe\u6216\u65e5\u5fd7**\r\n\r\n\r\n\r\n**\u5907\u6ce8**\r\n\r\n\u6211\u81ea\u5df1\u5199\u4e86\u4e00\u6bb5\u4fee\u590d\u4ee3\u7801\uff08\u4e0d\u4fdd\u8bc1\u7a33\u5b9a\u6027\uff09\r\n\r\n[`template.py`](https://github.com/nonebot/nonebot2/blob/master/nonebot/internal/adapter/template.py)\u5c06\u539f\u6765\u7684\r\n\r\n```python\r\nformatted_text = self.format_field(obj, str(format_control))\r\nresults.append(formatted_text)\r\n```\r\n\u66ff\u6362\u4e3a\r\n```python\r\nfrom .message import MessageSegment\r\nif isinstance(obj, MessageSegment):\r\n results.append(obj)\r\nelse:\r\n formatted_text = self.format_field(obj, str(format_control))\r\n results.append(formatted_text)\r\n```\r\n\r\n\u4fee\u590d\u540e\u6548\u679c\u5982\u4e0b\r\n\r\n\n", "before_files": [{"content": "import functools\nfrom string import Formatter\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Set,\n Dict,\n List,\n Type,\n Tuple,\n Union,\n Generic,\n Mapping,\n TypeVar,\n Callable,\n Optional,\n Sequence,\n cast,\n overload,\n)\n\nif TYPE_CHECKING:\n from .message import Message, MessageSegment\n\nTM = TypeVar(\"TM\", bound=\"Message\")\nTF = TypeVar(\"TF\", str, \"Message\")\n\nFormatSpecFunc = Callable[[Any], str]\nFormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc)\n\n\nclass MessageTemplate(Formatter, Generic[TF]):\n \"\"\"\u6d88\u606f\u6a21\u677f\u683c\u5f0f\u5316\u5b9e\u73b0\u7c7b\u3002\n\n \u53c2\u6570:\n template: \u6a21\u677f\n factory: \u6d88\u606f\u7c7b\u578b\u5de5\u5382\uff0c\u9ed8\u8ba4\u4e3a `str`\n \"\"\"\n\n @overload\n def __init__(\n self: \"MessageTemplate[str]\", template: str, factory: Type[str] = str\n ) -> None:\n ...\n\n @overload\n def __init__(\n self: \"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM]\n ) -> None:\n ...\n\n def __init__(self, template, factory=str) -> None:\n self.template: TF = template\n self.factory: Type[TF] = factory\n self.format_specs: Dict[str, FormatSpecFunc] = {}\n\n def add_format_spec(\n self, spec: FormatSpecFunc_T, name: Optional[str] = None\n ) -> FormatSpecFunc_T:\n name = name or spec.__name__\n if name in self.format_specs:\n raise ValueError(f\"Format spec {name} already exists!\")\n self.format_specs[name] = spec\n return spec\n\n def format(self, *args, **kwargs):\n \"\"\"\u6839\u636e\u4f20\u5165\u53c2\u6570\u548c\u6a21\u677f\u751f\u6210\u6d88\u606f\u5bf9\u8c61\"\"\"\n return self._format(args, kwargs)\n\n def format_map(self, mapping: Mapping[str, Any]) -> TF:\n \"\"\"\u6839\u636e\u4f20\u5165\u5b57\u5178\u548c\u6a21\u677f\u751f\u6210\u6d88\u606f\u5bf9\u8c61, \u5728\u4f20\u5165\u5b57\u6bb5\u540d\u4e0d\u662f\u6709\u6548\u6807\u8bc6\u7b26\u65f6\u6709\u7528\"\"\"\n return self._format([], mapping)\n\n def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:\n msg = self.factory()\n if isinstance(self.template, str):\n msg += self.vformat(self.template, args, kwargs)\n elif isinstance(self.template, self.factory):\n template = cast(\"Message[MessageSegment]\", self.template)\n for seg in template:\n msg += self.vformat(str(seg), args, kwargs) if seg.is_text() else seg\n else:\n raise TypeError(\"template must be a string or instance of Message!\")\n\n return msg # type:ignore\n\n def vformat(\n self, format_string: str, args: Sequence[Any], kwargs: Mapping[str, Any]\n ) -> TF:\n used_args = set()\n result, _ = self._vformat(format_string, args, kwargs, used_args, 2)\n self.check_unused_args(list(used_args), args, kwargs)\n return result\n\n def _vformat(\n self,\n format_string: str,\n args: Sequence[Any],\n kwargs: Mapping[str, Any],\n used_args: Set[Union[int, str]],\n recursion_depth: int,\n auto_arg_index: int = 0,\n ) -> Tuple[TF, int]:\n if recursion_depth < 0:\n raise ValueError(\"Max string recursion exceeded\")\n\n results: List[Any] = [self.factory()]\n\n for (literal_text, field_name, format_spec, conversion) in self.parse(\n format_string\n ):\n\n # output the literal text\n if literal_text:\n results.append(literal_text)\n\n # if there's a field, output it\n if field_name is not None:\n # this is some markup, find the object and do\n # the formatting\n\n # handle arg indexing when empty field_names are given.\n if field_name == \"\":\n if auto_arg_index is False:\n raise ValueError(\n \"cannot switch from manual field specification to \"\n \"automatic field numbering\"\n )\n field_name = str(auto_arg_index)\n auto_arg_index += 1\n elif field_name.isdigit():\n if auto_arg_index:\n raise ValueError(\n \"cannot switch from manual field specification to \"\n \"automatic field numbering\"\n )\n # disable auto arg incrementing, if it gets\n # used later on, then an exception will be raised\n auto_arg_index = False\n\n # given the field_name, find the object it references\n # and the argument it came from\n obj, arg_used = self.get_field(field_name, args, kwargs)\n used_args.add(arg_used)\n\n assert format_spec is not None\n\n # do any conversion on the resulting object\n obj = self.convert_field(obj, conversion) if conversion else obj\n\n # expand the format spec, if needed\n format_control, auto_arg_index = self._vformat(\n format_spec,\n args,\n kwargs,\n used_args,\n recursion_depth - 1,\n auto_arg_index,\n )\n\n # format the object and append to the result\n formatted_text = self.format_field(obj, str(format_control))\n results.append(formatted_text)\n\n return functools.reduce(self._add, results), auto_arg_index\n\n def format_field(self, value: Any, format_spec: str) -> Any:\n formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)\n if formatter is None and not issubclass(self.factory, str):\n segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class()\n method = getattr(segment_class, format_spec, None)\n if callable(method) and not cast(str, method.__name__).startswith(\"_\"):\n formatter = getattr(segment_class, format_spec)\n return (\n super().format_field(value, format_spec)\n if formatter is None\n else formatter(value)\n )\n\n def _add(self, a: Any, b: Any) -> Any:\n try:\n return a + b\n except TypeError:\n return a + str(b)\n", "path": "nonebot/internal/adapter/template.py"}], "after_files": [{"content": "import functools\nfrom string import Formatter\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Set,\n Dict,\n List,\n Type,\n Tuple,\n Union,\n Generic,\n Mapping,\n TypeVar,\n Callable,\n Optional,\n Sequence,\n cast,\n overload,\n)\n\nif TYPE_CHECKING:\n from .message import Message, MessageSegment\n\nTM = TypeVar(\"TM\", bound=\"Message\")\nTF = TypeVar(\"TF\", str, \"Message\")\n\nFormatSpecFunc = Callable[[Any], str]\nFormatSpecFunc_T = TypeVar(\"FormatSpecFunc_T\", bound=FormatSpecFunc)\n\n\nclass MessageTemplate(Formatter, Generic[TF]):\n \"\"\"\u6d88\u606f\u6a21\u677f\u683c\u5f0f\u5316\u5b9e\u73b0\u7c7b\u3002\n\n \u53c2\u6570:\n template: \u6a21\u677f\n factory: \u6d88\u606f\u7c7b\u578b\u5de5\u5382\uff0c\u9ed8\u8ba4\u4e3a `str`\n \"\"\"\n\n @overload\n def __init__(\n self: \"MessageTemplate[str]\", template: str, factory: Type[str] = str\n ) -> None:\n ...\n\n @overload\n def __init__(\n self: \"MessageTemplate[TM]\", template: Union[str, TM], factory: Type[TM]\n ) -> None:\n ...\n\n def __init__( # type:ignore\n self, template, factory=str\n ) -> None: # TODO: fix type hint here\n self.template: TF = template\n self.factory: Type[TF] = factory\n self.format_specs: Dict[str, FormatSpecFunc] = {}\n\n def add_format_spec(\n self, spec: FormatSpecFunc_T, name: Optional[str] = None\n ) -> FormatSpecFunc_T:\n name = name or spec.__name__\n if name in self.format_specs:\n raise ValueError(f\"Format spec {name} already exists!\")\n self.format_specs[name] = spec\n return spec\n\n def format(self, *args, **kwargs):\n \"\"\"\u6839\u636e\u4f20\u5165\u53c2\u6570\u548c\u6a21\u677f\u751f\u6210\u6d88\u606f\u5bf9\u8c61\"\"\"\n return self._format(args, kwargs)\n\n def format_map(self, mapping: Mapping[str, Any]) -> TF:\n \"\"\"\u6839\u636e\u4f20\u5165\u5b57\u5178\u548c\u6a21\u677f\u751f\u6210\u6d88\u606f\u5bf9\u8c61, \u5728\u4f20\u5165\u5b57\u6bb5\u540d\u4e0d\u662f\u6709\u6548\u6807\u8bc6\u7b26\u65f6\u6709\u7528\"\"\"\n return self._format([], mapping)\n\n def _format(self, args: Sequence[Any], kwargs: Mapping[str, Any]) -> TF:\n full_message = self.factory()\n used_args, arg_index = set(), 0\n\n if isinstance(self.template, str):\n msg, arg_index = self._vformat(\n self.template, args, kwargs, used_args, arg_index\n )\n full_message += msg\n elif isinstance(self.template, self.factory):\n template = cast(\"Message[MessageSegment]\", self.template)\n for seg in template:\n if not seg.is_text():\n full_message += seg\n else:\n msg, arg_index = self._vformat(\n str(seg), args, kwargs, used_args, arg_index\n )\n full_message += msg\n else:\n raise TypeError(\"template must be a string or instance of Message!\")\n\n self.check_unused_args(list(used_args), args, kwargs)\n return cast(TF, full_message)\n\n def vformat(\n self,\n format_string: str,\n args: Sequence[Any],\n kwargs: Mapping[str, Any],\n ) -> TF:\n raise NotImplementedError(\"`vformat` has merged into `_format`\")\n\n def _vformat(\n self,\n format_string: str,\n args: Sequence[Any],\n kwargs: Mapping[str, Any],\n used_args: Set[Union[int, str]],\n auto_arg_index: int = 0,\n ) -> Tuple[TF, int]:\n results: List[Any] = [self.factory()]\n\n for (literal_text, field_name, format_spec, conversion) in self.parse(\n format_string\n ):\n\n # output the literal text\n if literal_text:\n results.append(literal_text)\n\n # if there's a field, output it\n if field_name is not None:\n # this is some markup, find the object and do\n # the formatting\n\n # handle arg indexing when empty field_names are given.\n if field_name == \"\":\n if auto_arg_index is False:\n raise ValueError(\n \"cannot switch from manual field specification to \"\n \"automatic field numbering\"\n )\n field_name = str(auto_arg_index)\n auto_arg_index += 1\n elif field_name.isdigit():\n if auto_arg_index:\n raise ValueError(\n \"cannot switch from manual field specification to \"\n \"automatic field numbering\"\n )\n # disable auto arg incrementing, if it gets\n # used later on, then an exception will be raised\n auto_arg_index = False\n\n # given the field_name, find the object it references\n # and the argument it came from\n obj, arg_used = self.get_field(field_name, args, kwargs)\n used_args.add(arg_used)\n\n # do any conversion on the resulting object\n obj = self.convert_field(obj, conversion) if conversion else obj\n\n # format the object and append to the result\n formatted_text = (\n self.format_field(obj, format_spec) if format_spec else obj\n )\n results.append(formatted_text)\n\n return functools.reduce(self._add, results), auto_arg_index\n\n def format_field(self, value: Any, format_spec: str) -> Any:\n formatter: Optional[FormatSpecFunc] = self.format_specs.get(format_spec)\n if formatter is None and not issubclass(self.factory, str):\n segment_class: Type[\"MessageSegment\"] = self.factory.get_segment_class()\n method = getattr(segment_class, format_spec, None)\n if callable(method) and not cast(str, method.__name__).startswith(\"_\"):\n formatter = getattr(segment_class, format_spec)\n return (\n super().format_field(value, format_spec)\n if formatter is None\n else formatter(value)\n )\n\n def _add(self, a: Any, b: Any) -> Any:\n try:\n return a + b\n except TypeError:\n return a + str(b)\n", "path": "nonebot/internal/adapter/template.py"}]} | 3,058 | 897 |
gh_patches_debug_31980 | rasdani/github-patches | git_diff | enthought__chaco-506 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Zoom broken in some demos
**Problem Description**
This bug affects the following demos:
* `shell/contour.py`
* `shell/contourf.py`
* `basic/contour_plot.py`
Mouse-scroll doesn't zoom the plot until the user pans the plot. After panning, the plot is really zoomed out.
**Reproduction Steps:**
Mouse-scroll, then left-drag.
```python
python shell/contour.py
```
**Expected behavior:**
Plot should zoom immediately on mouse-scroll.
**OS, Python version:**
MacOSX 10.14
Python3.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `chaco/base_contour_plot.py`
Content:
```
1 import six
2
3 from numpy import array, isscalar, issubsctype, linspace, number
4
5 # Enthought library imports
6 from enable.api import ColorTrait
7 from traits.api import Bool, Instance, Int, List, Property, \
8 Range, Str, Trait, Tuple
9
10 # Local relative imports
11 from .base_2d_plot import Base2DPlot
12 from .color_mapper import ColorMapper
13
14
15 class BaseContourPlot(Base2DPlot):
16 """ The base class for contour plots. Mostly manages configuration and
17 change events with colormap and contour parameters.
18 """
19
20 #------------------------------------------------------------------------
21 # Data-related traits
22 #------------------------------------------------------------------------
23
24 # Defines the levels to contour.
25 # ``levels`` can be either: a list of floating point numbers that define
26 # the value of the function at the contours; a positive integer, in which
27 # case the range of the value is divided in the given number of equally
28 # spaced levels; or "auto" (default), which divides the range in 10 levels
29 levels = Trait("auto", Int, List)
30
31 # The color(s) of the lines.
32 # ``colors`` can be given as a color name, in which case all contours have
33 # the same color, as a list of colors, or as a colormap. If the list of
34 # colors is shorter than the number of levels, the values are repeated
35 # from the beginning of the list. Default is black.
36 # Colors are associated with levels of increasing value.
37 colors = Trait(None, Str, Instance(ColorMapper), List, Tuple)
38
39 # If present, the color mapper for the colorbar to look at.
40 color_mapper = Property(Instance(ColorMapper))
41
42 # A global alpha value to apply to all the contours
43 alpha = Trait(1.0, Range(0.0, 1.0))
44
45 #------------------------------------------------------------------------
46 # Private traits
47 #------------------------------------------------------------------------
48
49 # Is the cached level data valid?
50 _level_cache_valid = Bool(False)
51
52 # Is the cached color data valid?
53 _colors_cache_valid = Bool(False)
54
55 # List of levels and their associated line properties.
56 _levels = List
57
58 # List of colors
59 _colors = List
60
61 # Mapped trait used to convert user-suppied color values to AGG-acceptable
62 # ones. (Mapped traits in lists are not supported, must be converted one at
63 # a time.)
64 _color_map_trait = ColorTrait
65
66
67 def __init__(self, *args, **kwargs):
68 super(BaseContourPlot, self).__init__(*args, **kwargs)
69 if self.color_mapper:
70 self.color_mapper.on_trait_change(self._update_color_mapper, "updated")
71 return
72
73 def _update_levels(self):
74 """ Updates the levels cache. """
75 low, high = self.value.get_bounds()
76 if self.levels == "auto":
77 self._levels = list(linspace(low, high, 10))
78 elif isinstance(self.levels, int):
79 self._levels = list(linspace(low, high, self.levels))
80 else:
81 self._levels = self.levels
82 self._levels.sort()
83 self._level_cache_valid = True
84 self._colors_cache_valid = False
85
86 def _update_colors(self, numcolors=None):
87 """ Update the colors cache using our color mapper and based
88 on our number of levels. The **mode** parameter accounts for fenceposting:
89 - If **mode** is "poly", then the number of colors to generate is 1
90 less than the number of levels
91 - If **mode** is "line", then the number of colors to generate is
92 equal to the number of levels
93 """
94 if numcolors is None:
95 numcolors = len(self._levels)
96
97 colors = self.colors
98 # If we are given no colors, set a default for all levels
99 if colors is None:
100 self._color_map_trait = "black"
101 self._colors = [self._color_map_trait_] * numcolors
102
103 # If we are given a single color, apply it to all levels
104 elif isinstance(colors, six.string_types):
105 self._color_map_trait = colors
106 self._colors = [self._color_map_trait_] * numcolors
107
108 # If we are given a colormap, use it to map all the levels to colors
109 elif isinstance(colors, ColorMapper):
110 self._colors = []
111 mapped_colors = self.color_mapper.map_screen(array(self._levels))
112 for i in range(numcolors):
113 self._color_map_trait = tuple(mapped_colors[i])
114 self._colors.append(self._color_map_trait_)
115
116 # A list or tuple
117 # This could be a length 3 or 4 sequence of scalars, which indicates
118 # a color; otherwise, this is interpreted as a list of items to
119 # be converted via self._color_map_trait.
120 else:
121 if len(colors) in (3,4) and \
122 (isscalar(colors[0]) and issubsctype(type(colors[0]), number)):
123 self._color_map_trait = colors
124 self._colors = [self._color_map_trait_] * numcolors
125 else:
126 # if the list of colors is shorter than the list of levels, simply
127 # repeat colors from the beginning of the list as needed
128 self._colors = []
129 for i in range(len(self._levels)):
130 self._color_map_trait = colors[i%len(colors)]
131 self._colors.append(self._color_map_trait_)
132
133 self._colors_cache_valid = True
134 return
135
136
137 #------------------------------------------------------------------------
138 # Event handlers
139 #------------------------------------------------------------------------
140
141 def _index_data_changed_fired(self):
142 # If the index data has changed, the reset the levels cache (which
143 # also triggers all the other caches to reset).
144 self._level_cache_valid = False
145 self.invalidate_draw()
146
147 def _value_data_changed_fired(self):
148 # If the index data has changed, the reset the levels cache (which
149 # also triggers all the other caches to reset).
150 self._level_cache_valid = False
151 self.invalidate_draw()
152
153 def _index_mapper_changed_fired(self):
154 # If the index mapper has changed, then we need to redraw
155 self.invalidate_draw()
156
157 def _update_color_mapper(self):
158 # If the color mapper has changed, then we need to recompute the
159 # levels and cached data associated with that.
160 self._level_cache_valid = False
161 self.invalidate_draw()
162
163 def _levels_changed(self):
164 self._update_levels()
165 self.invalidate_draw()
166 self.request_redraw()
167
168 def _colors_changed(self):
169 if self._level_cache_valid:
170 self._update_colors()
171 self.invalidate_draw()
172
173 #------------------------------------------------------------------------
174 # Trait properties
175 #------------------------------------------------------------------------
176
177 def _get_color_mapper(self):
178 if isinstance(self.colors, ColorMapper):
179 return self.colors
180 else:
181 return None
182
183 def _set_color_mapper(self, color_mapper):
184 # Remove the dynamic event handler from the old color mapper
185 if self.colors is not None and isinstance(self.colors, ColorMapper):
186 self.colors.on_trait_change(self._update_color_mapper, "updated", remove=True)
187
188 # Check to see if we should copy over the range as well
189 if color_mapper is not None:
190 if color_mapper.range is None and self.colors.range is not None:
191 color_mapper.range = self.colors.range
192
193 # Attach the dynamic event handler to the new color mapper
194 if color_mapper is not None:
195 color_mapper.on_trait_change(self._update_color_mapper, "updated")
196
197 self.colors = color_mapper
198 self._update_color_mapper()
199
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/chaco/base_contour_plot.py b/chaco/base_contour_plot.py
--- a/chaco/base_contour_plot.py
+++ b/chaco/base_contour_plot.py
@@ -142,33 +142,32 @@
# If the index data has changed, the reset the levels cache (which
# also triggers all the other caches to reset).
self._level_cache_valid = False
- self.invalidate_draw()
+ self.invalidate_and_redraw()
def _value_data_changed_fired(self):
# If the index data has changed, the reset the levels cache (which
# also triggers all the other caches to reset).
self._level_cache_valid = False
- self.invalidate_draw()
+ self.invalidate_and_redraw()
def _index_mapper_changed_fired(self):
# If the index mapper has changed, then we need to redraw
- self.invalidate_draw()
+ self.invalidate_and_redraw()
def _update_color_mapper(self):
# If the color mapper has changed, then we need to recompute the
# levels and cached data associated with that.
self._level_cache_valid = False
- self.invalidate_draw()
+ self.invalidate_and_redraw()
def _levels_changed(self):
self._update_levels()
- self.invalidate_draw()
- self.request_redraw()
+ self.invalidate_and_redraw()
def _colors_changed(self):
if self._level_cache_valid:
self._update_colors()
- self.invalidate_draw()
+ self.invalidate_and_redraw()
#------------------------------------------------------------------------
# Trait properties
| {"golden_diff": "diff --git a/chaco/base_contour_plot.py b/chaco/base_contour_plot.py\n--- a/chaco/base_contour_plot.py\n+++ b/chaco/base_contour_plot.py\n@@ -142,33 +142,32 @@\n # If the index data has changed, the reset the levels cache (which\n # also triggers all the other caches to reset).\n self._level_cache_valid = False\n- self.invalidate_draw()\n+ self.invalidate_and_redraw()\n \n def _value_data_changed_fired(self):\n # If the index data has changed, the reset the levels cache (which\n # also triggers all the other caches to reset).\n self._level_cache_valid = False\n- self.invalidate_draw()\n+ self.invalidate_and_redraw()\n \n def _index_mapper_changed_fired(self):\n # If the index mapper has changed, then we need to redraw\n- self.invalidate_draw()\n+ self.invalidate_and_redraw()\n \n def _update_color_mapper(self):\n # If the color mapper has changed, then we need to recompute the\n # levels and cached data associated with that.\n self._level_cache_valid = False\n- self.invalidate_draw()\n+ self.invalidate_and_redraw()\n \n def _levels_changed(self):\n self._update_levels()\n- self.invalidate_draw()\n- self.request_redraw()\n+ self.invalidate_and_redraw()\n \n def _colors_changed(self):\n if self._level_cache_valid:\n self._update_colors()\n- self.invalidate_draw()\n+ self.invalidate_and_redraw()\n \n #------------------------------------------------------------------------\n # Trait properties\n", "issue": "Zoom broken in some demos\n**Problem Description**\r\nThis bug affects the following demos:\r\n* `shell/contour.py`\r\n* `shell/contourf.py`\r\n* `basic/contour_plot.py`\r\n\r\nMouse-scroll doesn't zoom the plot until the user pans the plot. After panning, the plot is really zoomed out.\r\n\r\n**Reproduction Steps:**\r\nMouse-scroll, then left-drag.\r\n\r\n```python\r\npython shell/contour.py\r\n```\r\n\r\n**Expected behavior:**\r\nPlot should zoom immediately on mouse-scroll.\r\n\r\n**OS, Python version:**\r\nMacOSX 10.14\r\nPython3.6\n", "before_files": [{"content": "import six\n\nfrom numpy import array, isscalar, issubsctype, linspace, number\n\n# Enthought library imports\nfrom enable.api import ColorTrait\nfrom traits.api import Bool, Instance, Int, List, Property, \\\n Range, Str, Trait, Tuple\n\n# Local relative imports\nfrom .base_2d_plot import Base2DPlot\nfrom .color_mapper import ColorMapper\n\n\nclass BaseContourPlot(Base2DPlot):\n \"\"\" The base class for contour plots. Mostly manages configuration and\n change events with colormap and contour parameters.\n \"\"\"\n\n #------------------------------------------------------------------------\n # Data-related traits\n #------------------------------------------------------------------------\n\n # Defines the levels to contour.\n # ``levels`` can be either: a list of floating point numbers that define\n # the value of the function at the contours; a positive integer, in which\n # case the range of the value is divided in the given number of equally\n # spaced levels; or \"auto\" (default), which divides the range in 10 levels\n levels = Trait(\"auto\", Int, List)\n\n # The color(s) of the lines.\n # ``colors`` can be given as a color name, in which case all contours have\n # the same color, as a list of colors, or as a colormap. If the list of\n # colors is shorter than the number of levels, the values are repeated\n # from the beginning of the list. Default is black.\n # Colors are associated with levels of increasing value.\n colors = Trait(None, Str, Instance(ColorMapper), List, Tuple)\n\n # If present, the color mapper for the colorbar to look at.\n color_mapper = Property(Instance(ColorMapper))\n\n # A global alpha value to apply to all the contours\n alpha = Trait(1.0, Range(0.0, 1.0))\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n # Is the cached level data valid?\n _level_cache_valid = Bool(False)\n\n # Is the cached color data valid?\n _colors_cache_valid = Bool(False)\n\n # List of levels and their associated line properties.\n _levels = List\n\n # List of colors\n _colors = List\n\n # Mapped trait used to convert user-suppied color values to AGG-acceptable\n # ones. (Mapped traits in lists are not supported, must be converted one at\n # a time.)\n _color_map_trait = ColorTrait\n\n\n def __init__(self, *args, **kwargs):\n super(BaseContourPlot, self).__init__(*args, **kwargs)\n if self.color_mapper:\n self.color_mapper.on_trait_change(self._update_color_mapper, \"updated\")\n return\n\n def _update_levels(self):\n \"\"\" Updates the levels cache. \"\"\"\n low, high = self.value.get_bounds()\n if self.levels == \"auto\":\n self._levels = list(linspace(low, high, 10))\n elif isinstance(self.levels, int):\n self._levels = list(linspace(low, high, self.levels))\n else:\n self._levels = self.levels\n self._levels.sort()\n self._level_cache_valid = True\n self._colors_cache_valid = False\n\n def _update_colors(self, numcolors=None):\n \"\"\" Update the colors cache using our color mapper and based\n on our number of levels. The **mode** parameter accounts for fenceposting:\n - If **mode** is \"poly\", then the number of colors to generate is 1\n less than the number of levels\n - If **mode** is \"line\", then the number of colors to generate is\n equal to the number of levels\n \"\"\"\n if numcolors is None:\n numcolors = len(self._levels)\n\n colors = self.colors\n # If we are given no colors, set a default for all levels\n if colors is None:\n self._color_map_trait = \"black\"\n self._colors = [self._color_map_trait_] * numcolors\n\n # If we are given a single color, apply it to all levels\n elif isinstance(colors, six.string_types):\n self._color_map_trait = colors\n self._colors = [self._color_map_trait_] * numcolors\n\n # If we are given a colormap, use it to map all the levels to colors\n elif isinstance(colors, ColorMapper):\n self._colors = []\n mapped_colors = self.color_mapper.map_screen(array(self._levels))\n for i in range(numcolors):\n self._color_map_trait = tuple(mapped_colors[i])\n self._colors.append(self._color_map_trait_)\n\n # A list or tuple\n # This could be a length 3 or 4 sequence of scalars, which indicates\n # a color; otherwise, this is interpreted as a list of items to\n # be converted via self._color_map_trait.\n else:\n if len(colors) in (3,4) and \\\n (isscalar(colors[0]) and issubsctype(type(colors[0]), number)):\n self._color_map_trait = colors\n self._colors = [self._color_map_trait_] * numcolors\n else:\n # if the list of colors is shorter than the list of levels, simply\n # repeat colors from the beginning of the list as needed\n self._colors = []\n for i in range(len(self._levels)):\n self._color_map_trait = colors[i%len(colors)]\n self._colors.append(self._color_map_trait_)\n\n self._colors_cache_valid = True\n return\n\n\n #------------------------------------------------------------------------\n # Event handlers\n #------------------------------------------------------------------------\n\n def _index_data_changed_fired(self):\n # If the index data has changed, the reset the levels cache (which\n # also triggers all the other caches to reset).\n self._level_cache_valid = False\n self.invalidate_draw()\n\n def _value_data_changed_fired(self):\n # If the index data has changed, the reset the levels cache (which\n # also triggers all the other caches to reset).\n self._level_cache_valid = False\n self.invalidate_draw()\n\n def _index_mapper_changed_fired(self):\n # If the index mapper has changed, then we need to redraw\n self.invalidate_draw()\n\n def _update_color_mapper(self):\n # If the color mapper has changed, then we need to recompute the\n # levels and cached data associated with that.\n self._level_cache_valid = False\n self.invalidate_draw()\n\n def _levels_changed(self):\n self._update_levels()\n self.invalidate_draw()\n self.request_redraw()\n\n def _colors_changed(self):\n if self._level_cache_valid:\n self._update_colors()\n self.invalidate_draw()\n\n #------------------------------------------------------------------------\n # Trait properties\n #------------------------------------------------------------------------\n\n def _get_color_mapper(self):\n if isinstance(self.colors, ColorMapper):\n return self.colors\n else:\n return None\n\n def _set_color_mapper(self, color_mapper):\n # Remove the dynamic event handler from the old color mapper\n if self.colors is not None and isinstance(self.colors, ColorMapper):\n self.colors.on_trait_change(self._update_color_mapper, \"updated\", remove=True)\n\n # Check to see if we should copy over the range as well\n if color_mapper is not None:\n if color_mapper.range is None and self.colors.range is not None:\n color_mapper.range = self.colors.range\n\n # Attach the dynamic event handler to the new color mapper\n if color_mapper is not None:\n color_mapper.on_trait_change(self._update_color_mapper, \"updated\")\n\n self.colors = color_mapper\n self._update_color_mapper()\n", "path": "chaco/base_contour_plot.py"}], "after_files": [{"content": "import six\n\nfrom numpy import array, isscalar, issubsctype, linspace, number\n\n# Enthought library imports\nfrom enable.api import ColorTrait\nfrom traits.api import Bool, Instance, Int, List, Property, \\\n Range, Str, Trait, Tuple\n\n# Local relative imports\nfrom .base_2d_plot import Base2DPlot\nfrom .color_mapper import ColorMapper\n\n\nclass BaseContourPlot(Base2DPlot):\n \"\"\" The base class for contour plots. Mostly manages configuration and\n change events with colormap and contour parameters.\n \"\"\"\n\n #------------------------------------------------------------------------\n # Data-related traits\n #------------------------------------------------------------------------\n\n # Defines the levels to contour.\n # ``levels`` can be either: a list of floating point numbers that define\n # the value of the function at the contours; a positive integer, in which\n # case the range of the value is divided in the given number of equally\n # spaced levels; or \"auto\" (default), which divides the range in 10 levels\n levels = Trait(\"auto\", Int, List)\n\n # The color(s) of the lines.\n # ``colors`` can be given as a color name, in which case all contours have\n # the same color, as a list of colors, or as a colormap. If the list of\n # colors is shorter than the number of levels, the values are repeated\n # from the beginning of the list. Default is black.\n # Colors are associated with levels of increasing value.\n colors = Trait(None, Str, Instance(ColorMapper), List, Tuple)\n\n # If present, the color mapper for the colorbar to look at.\n color_mapper = Property(Instance(ColorMapper))\n\n # A global alpha value to apply to all the contours\n alpha = Trait(1.0, Range(0.0, 1.0))\n\n #------------------------------------------------------------------------\n # Private traits\n #------------------------------------------------------------------------\n\n # Is the cached level data valid?\n _level_cache_valid = Bool(False)\n\n # Is the cached color data valid?\n _colors_cache_valid = Bool(False)\n\n # List of levels and their associated line properties.\n _levels = List\n\n # List of colors\n _colors = List\n\n # Mapped trait used to convert user-suppied color values to AGG-acceptable\n # ones. (Mapped traits in lists are not supported, must be converted one at\n # a time.)\n _color_map_trait = ColorTrait\n\n\n def __init__(self, *args, **kwargs):\n super(BaseContourPlot, self).__init__(*args, **kwargs)\n if self.color_mapper:\n self.color_mapper.on_trait_change(self._update_color_mapper, \"updated\")\n return\n\n def _update_levels(self):\n \"\"\" Updates the levels cache. \"\"\"\n low, high = self.value.get_bounds()\n if self.levels == \"auto\":\n self._levels = list(linspace(low, high, 10))\n elif isinstance(self.levels, int):\n self._levels = list(linspace(low, high, self.levels))\n else:\n self._levels = self.levels\n self._levels.sort()\n self._level_cache_valid = True\n self._colors_cache_valid = False\n\n def _update_colors(self, numcolors=None):\n \"\"\" Update the colors cache using our color mapper and based\n on our number of levels. The **mode** parameter accounts for fenceposting:\n - If **mode** is \"poly\", then the number of colors to generate is 1\n less than the number of levels\n - If **mode** is \"line\", then the number of colors to generate is\n equal to the number of levels\n \"\"\"\n if numcolors is None:\n numcolors = len(self._levels)\n\n colors = self.colors\n # If we are given no colors, set a default for all levels\n if colors is None:\n self._color_map_trait = \"black\"\n self._colors = [self._color_map_trait_] * numcolors\n\n # If we are given a single color, apply it to all levels\n elif isinstance(colors, six.string_types):\n self._color_map_trait = colors\n self._colors = [self._color_map_trait_] * numcolors\n\n # If we are given a colormap, use it to map all the levels to colors\n elif isinstance(colors, ColorMapper):\n self._colors = []\n mapped_colors = self.color_mapper.map_screen(array(self._levels))\n for i in range(numcolors):\n self._color_map_trait = tuple(mapped_colors[i])\n self._colors.append(self._color_map_trait_)\n\n # A list or tuple\n # This could be a length 3 or 4 sequence of scalars, which indicates\n # a color; otherwise, this is interpreted as a list of items to\n # be converted via self._color_map_trait.\n else:\n if len(colors) in (3,4) and \\\n (isscalar(colors[0]) and issubsctype(type(colors[0]), number)):\n self._color_map_trait = colors\n self._colors = [self._color_map_trait_] * numcolors\n else:\n # if the list of colors is shorter than the list of levels, simply\n # repeat colors from the beginning of the list as needed\n self._colors = []\n for i in range(len(self._levels)):\n self._color_map_trait = colors[i%len(colors)]\n self._colors.append(self._color_map_trait_)\n\n self._colors_cache_valid = True\n return\n\n\n #------------------------------------------------------------------------\n # Event handlers\n #------------------------------------------------------------------------\n\n def _index_data_changed_fired(self):\n # If the index data has changed, the reset the levels cache (which\n # also triggers all the other caches to reset).\n self._level_cache_valid = False\n self.invalidate_and_redraw()\n\n def _value_data_changed_fired(self):\n # If the index data has changed, the reset the levels cache (which\n # also triggers all the other caches to reset).\n self._level_cache_valid = False\n self.invalidate_and_redraw()\n\n def _index_mapper_changed_fired(self):\n # If the index mapper has changed, then we need to redraw\n self.invalidate_and_redraw()\n\n def _update_color_mapper(self):\n # If the color mapper has changed, then we need to recompute the\n # levels and cached data associated with that.\n self._level_cache_valid = False\n self.invalidate_and_redraw()\n\n def _levels_changed(self):\n self._update_levels()\n self.invalidate_and_redraw()\n\n def _colors_changed(self):\n if self._level_cache_valid:\n self._update_colors()\n self.invalidate_and_redraw()\n\n #------------------------------------------------------------------------\n # Trait properties\n #------------------------------------------------------------------------\n\n def _get_color_mapper(self):\n if isinstance(self.colors, ColorMapper):\n return self.colors\n else:\n return None\n\n def _set_color_mapper(self, color_mapper):\n # Remove the dynamic event handler from the old color mapper\n if self.colors is not None and isinstance(self.colors, ColorMapper):\n self.colors.on_trait_change(self._update_color_mapper, \"updated\", remove=True)\n\n # Check to see if we should copy over the range as well\n if color_mapper is not None:\n if color_mapper.range is None and self.colors.range is not None:\n color_mapper.range = self.colors.range\n\n # Attach the dynamic event handler to the new color mapper\n if color_mapper is not None:\n color_mapper.on_trait_change(self._update_color_mapper, \"updated\")\n\n self.colors = color_mapper\n self._update_color_mapper()\n", "path": "chaco/base_contour_plot.py"}]} | 2,547 | 349 |
gh_patches_debug_21142 | rasdani/github-patches | git_diff | beetbox__beets-3671 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
parentwork: In tests, mock all MusicBrainz responses
I didn't notice this when we originally merged the parentwork plugin in #3279, but its tests rely on real communication with the MusicBrainz web service, i.e., they fail if there is no network connectivity. [This Travis job](https://travis-ci.org/beetbox/beets/jobs/558936634) is an example of a spurious failure caused by network interruptions.
We need to isolate these tests by mocking the MB queries so that no network traffic is every actually sent.
@dosoe, can you please look into this?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `beetsplug/parentwork.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # This file is part of beets.
3 # Copyright 2017, Dorian Soergel.
4 #
5 # Permission is hereby granted, free of charge, to any person obtaining
6 # a copy of this software and associated documentation files (the
7 # "Software"), to deal in the Software without restriction, including
8 # without limitation the rights to use, copy, modify, merge, publish,
9 # distribute, sublicense, and/or sell copies of the Software, and to
10 # permit persons to whom the Software is furnished to do so, subject to
11 # the following conditions:
12 #
13 # The above copyright notice and this permission notice shall be
14 # included in all copies or substantial portions of the Software.
15
16 """Gets parent work, its disambiguation and id, composer, composer sort name
17 and work composition date
18 """
19
20 from __future__ import division, absolute_import, print_function
21
22 from beets import ui
23 from beets.plugins import BeetsPlugin
24
25 import musicbrainzngs
26
27
28 def direct_parent_id(mb_workid, work_date=None):
29 """Given a Musicbrainz work id, find the id one of the works the work is
30 part of and the first composition date it encounters.
31 """
32 work_info = musicbrainzngs.get_work_by_id(mb_workid,
33 includes=["work-rels",
34 "artist-rels"])
35 if 'artist-relation-list' in work_info['work'] and work_date is None:
36 for artist in work_info['work']['artist-relation-list']:
37 if artist['type'] == 'composer':
38 if 'end' in artist.keys():
39 work_date = artist['end']
40
41 if 'work-relation-list' in work_info['work']:
42 for direct_parent in work_info['work']['work-relation-list']:
43 if direct_parent['type'] == 'parts' \
44 and direct_parent.get('direction') == 'backward':
45 direct_id = direct_parent['work']['id']
46 return direct_id, work_date
47 return None, work_date
48
49
50 def work_parent_id(mb_workid):
51 """Find the parent work id and composition date of a work given its id.
52 """
53 work_date = None
54 while True:
55 new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)
56 if not new_mb_workid:
57 return mb_workid, work_date
58 mb_workid = new_mb_workid
59 return mb_workid, work_date
60
61
62 def find_parentwork_info(mb_workid):
63 """Get the MusicBrainz information dict about a parent work, including
64 the artist relations, and the composition date for a work's parent work.
65 """
66 parent_id, work_date = work_parent_id(mb_workid)
67 work_info = musicbrainzngs.get_work_by_id(parent_id,
68 includes=["artist-rels"])
69 return work_info, work_date
70
71
72 class ParentWorkPlugin(BeetsPlugin):
73 def __init__(self):
74 super(ParentWorkPlugin, self).__init__()
75
76 self.config.add({
77 'auto': False,
78 'force': False,
79 })
80
81 if self.config['auto']:
82 self.import_stages = [self.imported]
83
84 def commands(self):
85
86 def func(lib, opts, args):
87 self.config.set_args(opts)
88 force_parent = self.config['force'].get(bool)
89 write = ui.should_write()
90
91 for item in lib.items(ui.decargs(args)):
92 changed = self.find_work(item, force_parent)
93 if changed:
94 item.store()
95 if write:
96 item.try_write()
97 command = ui.Subcommand(
98 'parentwork',
99 help=u'fetche parent works, composers and dates')
100
101 command.parser.add_option(
102 u'-f', u'--force', dest='force',
103 action='store_true', default=None,
104 help=u're-fetch when parent work is already present')
105
106 command.func = func
107 return [command]
108
109 def imported(self, session, task):
110 """Import hook for fetching parent works automatically.
111 """
112 force_parent = self.config['force'].get(bool)
113
114 for item in task.imported_items():
115 self.find_work(item, force_parent)
116 item.store()
117
118 def get_info(self, item, work_info):
119 """Given the parent work info dict, fetch parent_composer,
120 parent_composer_sort, parentwork, parentwork_disambig, mb_workid and
121 composer_ids.
122 """
123
124 parent_composer = []
125 parent_composer_sort = []
126 parentwork_info = {}
127
128 composer_exists = False
129 if 'artist-relation-list' in work_info['work']:
130 for artist in work_info['work']['artist-relation-list']:
131 if artist['type'] == 'composer':
132 parent_composer.append(artist['artist']['name'])
133 parent_composer_sort.append(artist['artist']['sort-name'])
134 if 'end' in artist.keys():
135 parentwork_info["parentwork_date"] = artist['end']
136
137 parentwork_info['parent_composer'] = u', '.join(parent_composer)
138 parentwork_info['parent_composer_sort'] = u', '.join(
139 parent_composer_sort)
140
141 if not composer_exists:
142 self._log.debug(
143 'no composer for {}; add one at '
144 'https://musicbrainz.org/work/{}',
145 item, work_info['work']['id'],
146 )
147
148 parentwork_info['parentwork'] = work_info['work']['title']
149 parentwork_info['mb_parentworkid'] = work_info['work']['id']
150
151 if 'disambiguation' in work_info['work']:
152 parentwork_info['parentwork_disambig'] = work_info[
153 'work']['disambiguation']
154
155 else:
156 parentwork_info['parentwork_disambig'] = None
157
158 return parentwork_info
159
160 def find_work(self, item, force):
161 """Finds the parent work of a recording and populates the tags
162 accordingly.
163
164 The parent work is found recursively, by finding the direct parent
165 repeatedly until there are no more links in the chain. We return the
166 final, topmost work in the chain.
167
168 Namely, the tags parentwork, parentwork_disambig, mb_parentworkid,
169 parent_composer, parent_composer_sort and work_date are populated.
170 """
171
172 if not item.mb_workid:
173 self._log.info('No work for {}, \
174 add one at https://musicbrainz.org/recording/{}', item, item.mb_trackid)
175 return
176
177 hasparent = hasattr(item, 'parentwork')
178 work_changed = True
179 if hasattr(item, 'parentwork_workid_current'):
180 work_changed = item.parentwork_workid_current != item.mb_workid
181 if force or not hasparent or work_changed:
182 try:
183 work_info, work_date = find_parentwork_info(item.mb_workid)
184 except musicbrainzngs.musicbrainz.WebServiceError as e:
185 self._log.debug("error fetching work: {}", e)
186 return
187 parent_info = self.get_info(item, work_info)
188 parent_info['parentwork_workid_current'] = item.mb_workid
189 if 'parent_composer' in parent_info:
190 self._log.debug("Work fetched: {} - {}",
191 parent_info['parentwork'],
192 parent_info['parent_composer'])
193 else:
194 self._log.debug("Work fetched: {} - no parent composer",
195 parent_info['parentwork'])
196
197 elif hasparent:
198 self._log.debug("{}: Work present, skipping", item)
199 return
200
201 # apply all non-null values to the item
202 for key, value in parent_info.items():
203 if value:
204 item[key] = value
205
206 if work_date:
207 item['work_date'] = work_date
208 return ui.show_model_changes(
209 item, fields=['parentwork', 'parentwork_disambig',
210 'mb_parentworkid', 'parent_composer',
211 'parent_composer_sort', 'work_date',
212 'parentwork_workid_current', 'parentwork_date'])
213
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/beetsplug/parentwork.py b/beetsplug/parentwork.py
--- a/beetsplug/parentwork.py
+++ b/beetsplug/parentwork.py
@@ -96,7 +96,7 @@
item.try_write()
command = ui.Subcommand(
'parentwork',
- help=u'fetche parent works, composers and dates')
+ help=u'fetch parent works, composers and dates')
command.parser.add_option(
u'-f', u'--force', dest='force',
@@ -129,6 +129,7 @@
if 'artist-relation-list' in work_info['work']:
for artist in work_info['work']['artist-relation-list']:
if artist['type'] == 'composer':
+ composer_exists = True
parent_composer.append(artist['artist']['name'])
parent_composer_sort.append(artist['artist']['sort-name'])
if 'end' in artist.keys():
| {"golden_diff": "diff --git a/beetsplug/parentwork.py b/beetsplug/parentwork.py\n--- a/beetsplug/parentwork.py\n+++ b/beetsplug/parentwork.py\n@@ -96,7 +96,7 @@\n item.try_write()\n command = ui.Subcommand(\n 'parentwork',\n- help=u'fetche parent works, composers and dates')\n+ help=u'fetch parent works, composers and dates')\n \n command.parser.add_option(\n u'-f', u'--force', dest='force',\n@@ -129,6 +129,7 @@\n if 'artist-relation-list' in work_info['work']:\n for artist in work_info['work']['artist-relation-list']:\n if artist['type'] == 'composer':\n+ composer_exists = True\n parent_composer.append(artist['artist']['name'])\n parent_composer_sort.append(artist['artist']['sort-name'])\n if 'end' in artist.keys():\n", "issue": "parentwork: In tests, mock all MusicBrainz responses\nI didn't notice this when we originally merged the parentwork plugin in #3279, but its tests rely on real communication with the MusicBrainz web service, i.e., they fail if there is no network connectivity. [This Travis job](https://travis-ci.org/beetbox/beets/jobs/558936634) is an example of a spurious failure caused by network interruptions.\r\n\r\nWe need to isolate these tests by mocking the MB queries so that no network traffic is every actually sent.\r\n\r\n@dosoe, can you please look into this?\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2017, Dorian Soergel.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Gets parent work, its disambiguation and id, composer, composer sort name\nand work composition date\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets import ui\nfrom beets.plugins import BeetsPlugin\n\nimport musicbrainzngs\n\n\ndef direct_parent_id(mb_workid, work_date=None):\n \"\"\"Given a Musicbrainz work id, find the id one of the works the work is\n part of and the first composition date it encounters.\n \"\"\"\n work_info = musicbrainzngs.get_work_by_id(mb_workid,\n includes=[\"work-rels\",\n \"artist-rels\"])\n if 'artist-relation-list' in work_info['work'] and work_date is None:\n for artist in work_info['work']['artist-relation-list']:\n if artist['type'] == 'composer':\n if 'end' in artist.keys():\n work_date = artist['end']\n\n if 'work-relation-list' in work_info['work']:\n for direct_parent in work_info['work']['work-relation-list']:\n if direct_parent['type'] == 'parts' \\\n and direct_parent.get('direction') == 'backward':\n direct_id = direct_parent['work']['id']\n return direct_id, work_date\n return None, work_date\n\n\ndef work_parent_id(mb_workid):\n \"\"\"Find the parent work id and composition date of a work given its id.\n \"\"\"\n work_date = None\n while True:\n new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)\n if not new_mb_workid:\n return mb_workid, work_date\n mb_workid = new_mb_workid\n return mb_workid, work_date\n\n\ndef find_parentwork_info(mb_workid):\n \"\"\"Get the MusicBrainz information dict about a parent work, including\n the artist relations, and the composition date for a work's parent work.\n \"\"\"\n parent_id, work_date = work_parent_id(mb_workid)\n work_info = musicbrainzngs.get_work_by_id(parent_id,\n includes=[\"artist-rels\"])\n return work_info, work_date\n\n\nclass ParentWorkPlugin(BeetsPlugin):\n def __init__(self):\n super(ParentWorkPlugin, self).__init__()\n\n self.config.add({\n 'auto': False,\n 'force': False,\n })\n\n if self.config['auto']:\n self.import_stages = [self.imported]\n\n def commands(self):\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n force_parent = self.config['force'].get(bool)\n write = ui.should_write()\n\n for item in lib.items(ui.decargs(args)):\n changed = self.find_work(item, force_parent)\n if changed:\n item.store()\n if write:\n item.try_write()\n command = ui.Subcommand(\n 'parentwork',\n help=u'fetche parent works, composers and dates')\n\n command.parser.add_option(\n u'-f', u'--force', dest='force',\n action='store_true', default=None,\n help=u're-fetch when parent work is already present')\n\n command.func = func\n return [command]\n\n def imported(self, session, task):\n \"\"\"Import hook for fetching parent works automatically.\n \"\"\"\n force_parent = self.config['force'].get(bool)\n\n for item in task.imported_items():\n self.find_work(item, force_parent)\n item.store()\n\n def get_info(self, item, work_info):\n \"\"\"Given the parent work info dict, fetch parent_composer,\n parent_composer_sort, parentwork, parentwork_disambig, mb_workid and\n composer_ids.\n \"\"\"\n\n parent_composer = []\n parent_composer_sort = []\n parentwork_info = {}\n\n composer_exists = False\n if 'artist-relation-list' in work_info['work']:\n for artist in work_info['work']['artist-relation-list']:\n if artist['type'] == 'composer':\n parent_composer.append(artist['artist']['name'])\n parent_composer_sort.append(artist['artist']['sort-name'])\n if 'end' in artist.keys():\n parentwork_info[\"parentwork_date\"] = artist['end']\n\n parentwork_info['parent_composer'] = u', '.join(parent_composer)\n parentwork_info['parent_composer_sort'] = u', '.join(\n parent_composer_sort)\n\n if not composer_exists:\n self._log.debug(\n 'no composer for {}; add one at '\n 'https://musicbrainz.org/work/{}',\n item, work_info['work']['id'],\n )\n\n parentwork_info['parentwork'] = work_info['work']['title']\n parentwork_info['mb_parentworkid'] = work_info['work']['id']\n\n if 'disambiguation' in work_info['work']:\n parentwork_info['parentwork_disambig'] = work_info[\n 'work']['disambiguation']\n\n else:\n parentwork_info['parentwork_disambig'] = None\n\n return parentwork_info\n\n def find_work(self, item, force):\n \"\"\"Finds the parent work of a recording and populates the tags\n accordingly.\n\n The parent work is found recursively, by finding the direct parent\n repeatedly until there are no more links in the chain. We return the\n final, topmost work in the chain.\n\n Namely, the tags parentwork, parentwork_disambig, mb_parentworkid,\n parent_composer, parent_composer_sort and work_date are populated.\n \"\"\"\n\n if not item.mb_workid:\n self._log.info('No work for {}, \\\nadd one at https://musicbrainz.org/recording/{}', item, item.mb_trackid)\n return\n\n hasparent = hasattr(item, 'parentwork')\n work_changed = True\n if hasattr(item, 'parentwork_workid_current'):\n work_changed = item.parentwork_workid_current != item.mb_workid\n if force or not hasparent or work_changed:\n try:\n work_info, work_date = find_parentwork_info(item.mb_workid)\n except musicbrainzngs.musicbrainz.WebServiceError as e:\n self._log.debug(\"error fetching work: {}\", e)\n return\n parent_info = self.get_info(item, work_info)\n parent_info['parentwork_workid_current'] = item.mb_workid\n if 'parent_composer' in parent_info:\n self._log.debug(\"Work fetched: {} - {}\",\n parent_info['parentwork'],\n parent_info['parent_composer'])\n else:\n self._log.debug(\"Work fetched: {} - no parent composer\",\n parent_info['parentwork'])\n\n elif hasparent:\n self._log.debug(\"{}: Work present, skipping\", item)\n return\n\n # apply all non-null values to the item\n for key, value in parent_info.items():\n if value:\n item[key] = value\n\n if work_date:\n item['work_date'] = work_date\n return ui.show_model_changes(\n item, fields=['parentwork', 'parentwork_disambig',\n 'mb_parentworkid', 'parent_composer',\n 'parent_composer_sort', 'work_date',\n 'parentwork_workid_current', 'parentwork_date'])\n", "path": "beetsplug/parentwork.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# This file is part of beets.\n# Copyright 2017, Dorian Soergel.\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n\n\"\"\"Gets parent work, its disambiguation and id, composer, composer sort name\nand work composition date\n\"\"\"\n\nfrom __future__ import division, absolute_import, print_function\n\nfrom beets import ui\nfrom beets.plugins import BeetsPlugin\n\nimport musicbrainzngs\n\n\ndef direct_parent_id(mb_workid, work_date=None):\n \"\"\"Given a Musicbrainz work id, find the id one of the works the work is\n part of and the first composition date it encounters.\n \"\"\"\n work_info = musicbrainzngs.get_work_by_id(mb_workid,\n includes=[\"work-rels\",\n \"artist-rels\"])\n if 'artist-relation-list' in work_info['work'] and work_date is None:\n for artist in work_info['work']['artist-relation-list']:\n if artist['type'] == 'composer':\n if 'end' in artist.keys():\n work_date = artist['end']\n\n if 'work-relation-list' in work_info['work']:\n for direct_parent in work_info['work']['work-relation-list']:\n if direct_parent['type'] == 'parts' \\\n and direct_parent.get('direction') == 'backward':\n direct_id = direct_parent['work']['id']\n return direct_id, work_date\n return None, work_date\n\n\ndef work_parent_id(mb_workid):\n \"\"\"Find the parent work id and composition date of a work given its id.\n \"\"\"\n work_date = None\n while True:\n new_mb_workid, work_date = direct_parent_id(mb_workid, work_date)\n if not new_mb_workid:\n return mb_workid, work_date\n mb_workid = new_mb_workid\n return mb_workid, work_date\n\n\ndef find_parentwork_info(mb_workid):\n \"\"\"Get the MusicBrainz information dict about a parent work, including\n the artist relations, and the composition date for a work's parent work.\n \"\"\"\n parent_id, work_date = work_parent_id(mb_workid)\n work_info = musicbrainzngs.get_work_by_id(parent_id,\n includes=[\"artist-rels\"])\n return work_info, work_date\n\n\nclass ParentWorkPlugin(BeetsPlugin):\n def __init__(self):\n super(ParentWorkPlugin, self).__init__()\n\n self.config.add({\n 'auto': False,\n 'force': False,\n })\n\n if self.config['auto']:\n self.import_stages = [self.imported]\n\n def commands(self):\n\n def func(lib, opts, args):\n self.config.set_args(opts)\n force_parent = self.config['force'].get(bool)\n write = ui.should_write()\n\n for item in lib.items(ui.decargs(args)):\n changed = self.find_work(item, force_parent)\n if changed:\n item.store()\n if write:\n item.try_write()\n command = ui.Subcommand(\n 'parentwork',\n help=u'fetch parent works, composers and dates')\n\n command.parser.add_option(\n u'-f', u'--force', dest='force',\n action='store_true', default=None,\n help=u're-fetch when parent work is already present')\n\n command.func = func\n return [command]\n\n def imported(self, session, task):\n \"\"\"Import hook for fetching parent works automatically.\n \"\"\"\n force_parent = self.config['force'].get(bool)\n\n for item in task.imported_items():\n self.find_work(item, force_parent)\n item.store()\n\n def get_info(self, item, work_info):\n \"\"\"Given the parent work info dict, fetch parent_composer,\n parent_composer_sort, parentwork, parentwork_disambig, mb_workid and\n composer_ids.\n \"\"\"\n\n parent_composer = []\n parent_composer_sort = []\n parentwork_info = {}\n\n composer_exists = False\n if 'artist-relation-list' in work_info['work']:\n for artist in work_info['work']['artist-relation-list']:\n if artist['type'] == 'composer':\n composer_exists = True\n parent_composer.append(artist['artist']['name'])\n parent_composer_sort.append(artist['artist']['sort-name'])\n if 'end' in artist.keys():\n parentwork_info[\"parentwork_date\"] = artist['end']\n\n parentwork_info['parent_composer'] = u', '.join(parent_composer)\n parentwork_info['parent_composer_sort'] = u', '.join(\n parent_composer_sort)\n\n if not composer_exists:\n self._log.debug(\n 'no composer for {}; add one at '\n 'https://musicbrainz.org/work/{}',\n item, work_info['work']['id'],\n )\n\n parentwork_info['parentwork'] = work_info['work']['title']\n parentwork_info['mb_parentworkid'] = work_info['work']['id']\n\n if 'disambiguation' in work_info['work']:\n parentwork_info['parentwork_disambig'] = work_info[\n 'work']['disambiguation']\n\n else:\n parentwork_info['parentwork_disambig'] = None\n\n return parentwork_info\n\n def find_work(self, item, force):\n \"\"\"Finds the parent work of a recording and populates the tags\n accordingly.\n\n The parent work is found recursively, by finding the direct parent\n repeatedly until there are no more links in the chain. We return the\n final, topmost work in the chain.\n\n Namely, the tags parentwork, parentwork_disambig, mb_parentworkid,\n parent_composer, parent_composer_sort and work_date are populated.\n \"\"\"\n\n if not item.mb_workid:\n self._log.info('No work for {}, \\\nadd one at https://musicbrainz.org/recording/{}', item, item.mb_trackid)\n return\n\n hasparent = hasattr(item, 'parentwork')\n work_changed = True\n if hasattr(item, 'parentwork_workid_current'):\n work_changed = item.parentwork_workid_current != item.mb_workid\n if force or not hasparent or work_changed:\n try:\n work_info, work_date = find_parentwork_info(item.mb_workid)\n except musicbrainzngs.musicbrainz.WebServiceError as e:\n self._log.debug(\"error fetching work: {}\", e)\n return\n parent_info = self.get_info(item, work_info)\n parent_info['parentwork_workid_current'] = item.mb_workid\n if 'parent_composer' in parent_info:\n self._log.debug(\"Work fetched: {} - {}\",\n parent_info['parentwork'],\n parent_info['parent_composer'])\n else:\n self._log.debug(\"Work fetched: {} - no parent composer\",\n parent_info['parentwork'])\n\n elif hasparent:\n self._log.debug(\"{}: Work present, skipping\", item)\n return\n\n # apply all non-null values to the item\n for key, value in parent_info.items():\n if value:\n item[key] = value\n\n if work_date:\n item['work_date'] = work_date\n return ui.show_model_changes(\n item, fields=['parentwork', 'parentwork_disambig',\n 'mb_parentworkid', 'parent_composer',\n 'parent_composer_sort', 'work_date',\n 'parentwork_workid_current', 'parentwork_date'])\n", "path": "beetsplug/parentwork.py"}]} | 2,665 | 210 |
gh_patches_debug_21044 | rasdani/github-patches | git_diff | bridgecrewio__checkov-5336 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sarif report creates invalid uri for folder with spaces
**Describe the issue**
SonarQube will not import SARIF report from Checkov correctly because of invalid URI in SARIF
1) Scan folders with spaces that has some issues
example:
Secrets/Access Tokens/Azure/main.tf
2) Output result as sarif
3) Resulting file is not valid SARIF due to invalid URI
The field Secrets/Access Tokens/Azure/main.tf corresponds to the results/locations/physicalLocation/artifactLocation/uri object in the SARIF report. There is character the space in the URI. This is not expected. The URI field shouldn’t have any spaces.
This is against specification of URI, which forbids spaces in URIs.
Because of this problem , import of issues in directories with spaces will fail in SonarQube and possibly other tools
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `checkov/common/output/sarif.py`
Content:
```
1 from __future__ import annotations
2
3 import itertools
4 import json
5 from typing import TYPE_CHECKING, Any
6
7 from checkov.common.models.enums import CheckResult
8 from checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES
9 from checkov.common.util.http_utils import valid_url
10 from checkov.version import version
11
12 if TYPE_CHECKING:
13 from checkov.common.output.record import Record
14 from checkov.common.output.report import Report
15
16 SEVERITY_TO_SARIF_LEVEL = {
17 "critical": "error",
18 "high": "error",
19 "medium": "warning",
20 "low": "note",
21 "none": "none",
22 }
23
24
25 SEVERITY_TO_SCORE = {
26 "critical": "10.0",
27 "high": "8.9",
28 "medium": "6.9",
29 "low": "3.9",
30 "none": "0.0",
31 }
32
33
34 class Sarif:
35 def __init__(self, reports: list[Report], tool: str | None) -> None:
36 self.reports = reports
37 self.rule_index_map: "dict[str, int]" = {}
38 self.tool = tool if tool else "Bridgecrew"
39
40 self.json = self.create_json()
41
42 def create_json(self) -> dict[str, Any]:
43 return {
44 "$schema": "https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json",
45 "version": "2.1.0",
46 "runs": self._create_runs(),
47 }
48
49 def _create_runs(self) -> list[dict[str, Any]]:
50 information_uri = "https://docs.bridgecrew.io" if self.tool.lower() == "bridgecrew" else "https://checkov.io"
51 rules = self._create_rules() # needs to be invoked before _create_results()
52 results = self._create_results()
53
54 return [
55 {
56 "tool": {
57 "driver": {
58 "name": self.tool,
59 "version": version,
60 "informationUri": information_uri,
61 "rules": rules,
62 "organization": "bridgecrew",
63 }
64 },
65 "results": results,
66 }
67 ]
68
69 def _create_rules(self) -> list[dict[str, Any]]:
70 rule_idx = 0
71 rules: "list[dict[str, Any]]" = []
72
73 for report in self.reports:
74 if report.check_type in SCA_CHECKTYPES:
75 for record in itertools.chain(report.failed_checks, report.skipped_checks):
76 rule = None
77 if record.check_id.startswith("BC_LIC"):
78 rule = self._create_license_rule(check_type=report.check_type, record=record)
79 elif record.check_id.startswith(("BC_VUL", "CKV_CVE")):
80 rule = self._create_cve_rule(check_type=report.check_type, record=record)
81
82 if rule and rule["id"] not in self.rule_index_map:
83 self.rule_index_map[rule["id"]] = rule_idx
84 rules.append(rule)
85 rule_idx += 1
86 else:
87 for record in itertools.chain(report.failed_checks, report.skipped_checks):
88 if record.check_id not in self.rule_index_map:
89 rule = self._create_iac_rule(check_type=report.check_type, record=record)
90 self.rule_index_map[rule["id"]] = rule_idx
91 rules.append(rule)
92 rule_idx += 1
93
94 return rules
95
96 def _create_iac_rule(self, check_type: str, record: Record) -> dict[str, Any]:
97 rule = {
98 "id": self._create_rule_id(check_type=check_type, record=record),
99 "name": record.short_description or record.check_name,
100 "shortDescription": {
101 "text": record.short_description or record.check_name,
102 },
103 "fullDescription": {
104 "text": record.description or record.check_name,
105 },
106 "help": {
107 "text": f"{record.check_name}\nResource: {record.resource}",
108 },
109 "defaultConfiguration": {"level": "error"},
110 }
111
112 # Adding 'properties' dictionary only if 'record.severity' exists
113 if record.severity:
114 rule["properties"] = {
115 "security-severity": SEVERITY_TO_SCORE.get(record.severity.name.lower(), "0.0"),
116 }
117
118 help_uri = record.guideline
119 if valid_url(help_uri):
120 rule["helpUri"] = help_uri
121
122 return rule
123
124 def _create_cve_rule(self, check_type: str, record: Record) -> dict[str, Any] | None:
125 details = record.vulnerability_details
126 if not details:
127 # this shouldn't happen
128 return None
129
130 rule = {
131 "id": self._create_rule_id(check_type=check_type, record=record),
132 "name": record.short_description or record.check_name,
133 "shortDescription": {
134 "text": record.short_description or record.check_name,
135 },
136 "fullDescription": {
137 "text": record.description or record.check_name,
138 },
139 "help": {
140 "text": f"{record.check_name}\nResource: {record.resource}\nStatus: {details.get('status')}",
141 },
142 "defaultConfiguration": {"level": "error"},
143 }
144
145 # Add properties dictionary with security-severity
146 cvss = details.get("cvss")
147 if cvss:
148 # use CVSS, if exists
149 rule["properties"] = {
150 "security-severity": str(cvss),
151 }
152 elif record.severity:
153 # otherwise severity, if exists
154 rule["properties"] = {
155 "security-severity": SEVERITY_TO_SCORE.get(record.severity.name.lower(), "0.0"),
156 }
157
158 help_uri = details.get("link")
159 if valid_url(help_uri):
160 rule["helpUri"] = help_uri
161
162 return rule
163
164 def _create_license_rule(self, check_type: str, record: Record) -> dict[str, Any] | None:
165 details = record.vulnerability_details
166 if not details:
167 # this shouldn't happen
168 return None
169
170 rule = {
171 "id": self._create_rule_id(check_type=check_type, record=record),
172 "name": record.short_description or record.check_name,
173 "shortDescription": {
174 "text": record.short_description or record.check_name,
175 },
176 "fullDescription": {
177 "text": f"Package {details['package_name']}@{details['package_version']} has license {details['license']}",
178 },
179 "help": {
180 "text": f"{record.check_name}\nResource: {record.resource}",
181 },
182 "defaultConfiguration": {"level": "error"},
183 }
184
185 # Adding 'properties' dictionary only if 'record.severity' exists
186 if record.severity:
187 rule["properties"] = {
188 "security-severity": SEVERITY_TO_SCORE.get(record.severity.name.lower(), "0.0"),
189 }
190
191 help_uri = record.guideline
192 if valid_url(help_uri):
193 rule["helpUri"] = help_uri
194
195 return rule
196
197 def _create_results(self) -> list[dict[str, Any]]:
198 results: "list[dict[str, Any]]" = []
199
200 for report in self.reports:
201 for record in itertools.chain(report.failed_checks, report.skipped_checks):
202 level = "warning"
203 if record.severity:
204 level = SEVERITY_TO_SARIF_LEVEL.get(record.severity.name.lower(), "none")
205 elif record.check_result.get("result") == CheckResult.FAILED:
206 level = "error"
207
208 rule_id = self._create_rule_id(check_type=report.check_type, record=record)
209 if not rule_id or rule_id not in self.rule_index_map:
210 # can happen if data is missing
211 continue
212
213 result = {
214 "ruleId": rule_id,
215 "ruleIndex": self.rule_index_map[rule_id],
216 "level": level,
217 "attachments": [{"description": detail} for detail in record.details],
218 "message": {
219 "text": record.short_description or record.check_name,
220 },
221 "locations": [
222 {
223 "physicalLocation": {
224 "artifactLocation": {"uri": record.repo_file_path.lstrip("/")},
225 "region": {
226 "startLine": int(record.file_line_range[0]) or 1,
227 "endLine": int(record.file_line_range[1]) or 1,
228 "snippet": {"text": "".join(line for _, line in record.code_block)},
229 },
230 }
231 }
232 ],
233 }
234
235 if record.check_result.get("result") == CheckResult.SKIPPED:
236 # sca_package suppression can only be enabled via flag
237 # other runners only report in source suppression
238 kind = "external" if record.vulnerability_details else "inSource"
239 justification = record.check_result.get("suppress_comment")
240 if justification is None:
241 justification = "No comment provided"
242
243 result["suppressions"] = [
244 {
245 "kind": kind,
246 "justification": justification,
247 }
248 ]
249
250 results.append(result)
251
252 return results
253
254 def _create_rule_id(self, check_type: str, record: Record) -> str | None:
255 if check_type in SCA_CHECKTYPES:
256 details = record.vulnerability_details
257 if not details:
258 # this shouldn't happen
259 return None
260
261 if record.check_id.startswith("BC_LIC"):
262 return f"{details['license']}_{details['package_name']}@{details['package_version']}".replace(" ", "_")
263 elif record.check_id.startswith(("BC_VUL", "CKV_CVE")):
264 return f"{details['id']}_{details['package_name']}@{details['package_version']}".replace(" ", "_")
265 else:
266 return record.check_id
267
268 return None
269
270 def write_sarif_output(self) -> None:
271 try:
272 with open("results.sarif", "w") as f:
273 f.write(json.dumps(self.json))
274 print("\nWrote output in SARIF format to the file 'results.sarif'")
275 except EnvironmentError as e:
276 print("\nAn error occurred while writing SARIF results to file: results.sarif")
277 print(f"More details: \n {e}")
278
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/checkov/common/output/sarif.py b/checkov/common/output/sarif.py
--- a/checkov/common/output/sarif.py
+++ b/checkov/common/output/sarif.py
@@ -3,6 +3,7 @@
import itertools
import json
from typing import TYPE_CHECKING, Any
+from urllib.parse import quote
from checkov.common.models.enums import CheckResult
from checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES
@@ -221,7 +222,7 @@
"locations": [
{
"physicalLocation": {
- "artifactLocation": {"uri": record.repo_file_path.lstrip("/")},
+ "artifactLocation": {"uri": quote(record.repo_file_path.lstrip("/"))},
"region": {
"startLine": int(record.file_line_range[0]) or 1,
"endLine": int(record.file_line_range[1]) or 1,
| {"golden_diff": "diff --git a/checkov/common/output/sarif.py b/checkov/common/output/sarif.py\n--- a/checkov/common/output/sarif.py\n+++ b/checkov/common/output/sarif.py\n@@ -3,6 +3,7 @@\n import itertools\n import json\n from typing import TYPE_CHECKING, Any\n+from urllib.parse import quote\n \n from checkov.common.models.enums import CheckResult\n from checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES\n@@ -221,7 +222,7 @@\n \"locations\": [\n {\n \"physicalLocation\": {\n- \"artifactLocation\": {\"uri\": record.repo_file_path.lstrip(\"/\")},\n+ \"artifactLocation\": {\"uri\": quote(record.repo_file_path.lstrip(\"/\"))},\n \"region\": {\n \"startLine\": int(record.file_line_range[0]) or 1,\n \"endLine\": int(record.file_line_range[1]) or 1,\n", "issue": "Sarif report creates invalid uri for folder with spaces\n**Describe the issue**\r\nSonarQube will not import SARIF report from Checkov correctly because of invalid URI in SARIF\r\n\r\n1) Scan folders with spaces that has some issues\r\nexample:\r\nSecrets/Access Tokens/Azure/main.tf \r\n\r\n2) Output result as sarif\r\n3) Resulting file is not valid SARIF due to invalid URI\r\n\r\nThe field Secrets/Access Tokens/Azure/main.tf corresponds to the results/locations/physicalLocation/artifactLocation/uri object in the SARIF report. There is character the space in the URI. This is not expected. The URI field shouldn\u2019t have any spaces.\r\nThis is against specification of URI, which forbids spaces in URIs.\r\n\r\n\r\nBecause of this problem , import of issues in directories with spaces will fail in SonarQube and possibly other tools\n", "before_files": [{"content": "from __future__ import annotations\n\nimport itertools\nimport json\nfrom typing import TYPE_CHECKING, Any\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES\nfrom checkov.common.util.http_utils import valid_url\nfrom checkov.version import version\n\nif TYPE_CHECKING:\n from checkov.common.output.record import Record\n from checkov.common.output.report import Report\n\nSEVERITY_TO_SARIF_LEVEL = {\n \"critical\": \"error\",\n \"high\": \"error\",\n \"medium\": \"warning\",\n \"low\": \"note\",\n \"none\": \"none\",\n}\n\n\nSEVERITY_TO_SCORE = {\n \"critical\": \"10.0\",\n \"high\": \"8.9\",\n \"medium\": \"6.9\",\n \"low\": \"3.9\",\n \"none\": \"0.0\",\n}\n\n\nclass Sarif:\n def __init__(self, reports: list[Report], tool: str | None) -> None:\n self.reports = reports\n self.rule_index_map: \"dict[str, int]\" = {}\n self.tool = tool if tool else \"Bridgecrew\"\n\n self.json = self.create_json()\n\n def create_json(self) -> dict[str, Any]:\n return {\n \"$schema\": \"https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json\",\n \"version\": \"2.1.0\",\n \"runs\": self._create_runs(),\n }\n\n def _create_runs(self) -> list[dict[str, Any]]:\n information_uri = \"https://docs.bridgecrew.io\" if self.tool.lower() == \"bridgecrew\" else \"https://checkov.io\"\n rules = self._create_rules() # needs to be invoked before _create_results()\n results = self._create_results()\n\n return [\n {\n \"tool\": {\n \"driver\": {\n \"name\": self.tool,\n \"version\": version,\n \"informationUri\": information_uri,\n \"rules\": rules,\n \"organization\": \"bridgecrew\",\n }\n },\n \"results\": results,\n }\n ]\n\n def _create_rules(self) -> list[dict[str, Any]]:\n rule_idx = 0\n rules: \"list[dict[str, Any]]\" = []\n\n for report in self.reports:\n if report.check_type in SCA_CHECKTYPES:\n for record in itertools.chain(report.failed_checks, report.skipped_checks):\n rule = None\n if record.check_id.startswith(\"BC_LIC\"):\n rule = self._create_license_rule(check_type=report.check_type, record=record)\n elif record.check_id.startswith((\"BC_VUL\", \"CKV_CVE\")):\n rule = self._create_cve_rule(check_type=report.check_type, record=record)\n\n if rule and rule[\"id\"] not in self.rule_index_map:\n self.rule_index_map[rule[\"id\"]] = rule_idx\n rules.append(rule)\n rule_idx += 1\n else:\n for record in itertools.chain(report.failed_checks, report.skipped_checks):\n if record.check_id not in self.rule_index_map:\n rule = self._create_iac_rule(check_type=report.check_type, record=record)\n self.rule_index_map[rule[\"id\"]] = rule_idx\n rules.append(rule)\n rule_idx += 1\n\n return rules\n\n def _create_iac_rule(self, check_type: str, record: Record) -> dict[str, Any]:\n rule = {\n \"id\": self._create_rule_id(check_type=check_type, record=record),\n \"name\": record.short_description or record.check_name,\n \"shortDescription\": {\n \"text\": record.short_description or record.check_name,\n },\n \"fullDescription\": {\n \"text\": record.description or record.check_name,\n },\n \"help\": {\n \"text\": f\"{record.check_name}\\nResource: {record.resource}\",\n },\n \"defaultConfiguration\": {\"level\": \"error\"},\n }\n\n # Adding 'properties' dictionary only if 'record.severity' exists\n if record.severity:\n rule[\"properties\"] = {\n \"security-severity\": SEVERITY_TO_SCORE.get(record.severity.name.lower(), \"0.0\"),\n }\n\n help_uri = record.guideline\n if valid_url(help_uri):\n rule[\"helpUri\"] = help_uri\n\n return rule\n\n def _create_cve_rule(self, check_type: str, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n rule = {\n \"id\": self._create_rule_id(check_type=check_type, record=record),\n \"name\": record.short_description or record.check_name,\n \"shortDescription\": {\n \"text\": record.short_description or record.check_name,\n },\n \"fullDescription\": {\n \"text\": record.description or record.check_name,\n },\n \"help\": {\n \"text\": f\"{record.check_name}\\nResource: {record.resource}\\nStatus: {details.get('status')}\",\n },\n \"defaultConfiguration\": {\"level\": \"error\"},\n }\n\n # Add properties dictionary with security-severity\n cvss = details.get(\"cvss\")\n if cvss:\n # use CVSS, if exists\n rule[\"properties\"] = {\n \"security-severity\": str(cvss),\n }\n elif record.severity:\n # otherwise severity, if exists\n rule[\"properties\"] = {\n \"security-severity\": SEVERITY_TO_SCORE.get(record.severity.name.lower(), \"0.0\"),\n }\n\n help_uri = details.get(\"link\")\n if valid_url(help_uri):\n rule[\"helpUri\"] = help_uri\n\n return rule\n\n def _create_license_rule(self, check_type: str, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n rule = {\n \"id\": self._create_rule_id(check_type=check_type, record=record),\n \"name\": record.short_description or record.check_name,\n \"shortDescription\": {\n \"text\": record.short_description or record.check_name,\n },\n \"fullDescription\": {\n \"text\": f\"Package {details['package_name']}@{details['package_version']} has license {details['license']}\",\n },\n \"help\": {\n \"text\": f\"{record.check_name}\\nResource: {record.resource}\",\n },\n \"defaultConfiguration\": {\"level\": \"error\"},\n }\n\n # Adding 'properties' dictionary only if 'record.severity' exists\n if record.severity:\n rule[\"properties\"] = {\n \"security-severity\": SEVERITY_TO_SCORE.get(record.severity.name.lower(), \"0.0\"),\n }\n\n help_uri = record.guideline\n if valid_url(help_uri):\n rule[\"helpUri\"] = help_uri\n\n return rule\n\n def _create_results(self) -> list[dict[str, Any]]:\n results: \"list[dict[str, Any]]\" = []\n\n for report in self.reports:\n for record in itertools.chain(report.failed_checks, report.skipped_checks):\n level = \"warning\"\n if record.severity:\n level = SEVERITY_TO_SARIF_LEVEL.get(record.severity.name.lower(), \"none\")\n elif record.check_result.get(\"result\") == CheckResult.FAILED:\n level = \"error\"\n\n rule_id = self._create_rule_id(check_type=report.check_type, record=record)\n if not rule_id or rule_id not in self.rule_index_map:\n # can happen if data is missing\n continue\n\n result = {\n \"ruleId\": rule_id,\n \"ruleIndex\": self.rule_index_map[rule_id],\n \"level\": level,\n \"attachments\": [{\"description\": detail} for detail in record.details],\n \"message\": {\n \"text\": record.short_description or record.check_name,\n },\n \"locations\": [\n {\n \"physicalLocation\": {\n \"artifactLocation\": {\"uri\": record.repo_file_path.lstrip(\"/\")},\n \"region\": {\n \"startLine\": int(record.file_line_range[0]) or 1,\n \"endLine\": int(record.file_line_range[1]) or 1,\n \"snippet\": {\"text\": \"\".join(line for _, line in record.code_block)},\n },\n }\n }\n ],\n }\n\n if record.check_result.get(\"result\") == CheckResult.SKIPPED:\n # sca_package suppression can only be enabled via flag\n # other runners only report in source suppression\n kind = \"external\" if record.vulnerability_details else \"inSource\"\n justification = record.check_result.get(\"suppress_comment\")\n if justification is None:\n justification = \"No comment provided\"\n\n result[\"suppressions\"] = [\n {\n \"kind\": kind,\n \"justification\": justification,\n }\n ]\n\n results.append(result)\n\n return results\n\n def _create_rule_id(self, check_type: str, record: Record) -> str | None:\n if check_type in SCA_CHECKTYPES:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n if record.check_id.startswith(\"BC_LIC\"):\n return f\"{details['license']}_{details['package_name']}@{details['package_version']}\".replace(\" \", \"_\")\n elif record.check_id.startswith((\"BC_VUL\", \"CKV_CVE\")):\n return f\"{details['id']}_{details['package_name']}@{details['package_version']}\".replace(\" \", \"_\")\n else:\n return record.check_id\n\n return None\n\n def write_sarif_output(self) -> None:\n try:\n with open(\"results.sarif\", \"w\") as f:\n f.write(json.dumps(self.json))\n print(\"\\nWrote output in SARIF format to the file 'results.sarif'\")\n except EnvironmentError as e:\n print(\"\\nAn error occurred while writing SARIF results to file: results.sarif\")\n print(f\"More details: \\n {e}\")\n", "path": "checkov/common/output/sarif.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport itertools\nimport json\nfrom typing import TYPE_CHECKING, Any\nfrom urllib.parse import quote\n\nfrom checkov.common.models.enums import CheckResult\nfrom checkov.common.output.cyclonedx_consts import SCA_CHECKTYPES\nfrom checkov.common.util.http_utils import valid_url\nfrom checkov.version import version\n\nif TYPE_CHECKING:\n from checkov.common.output.record import Record\n from checkov.common.output.report import Report\n\nSEVERITY_TO_SARIF_LEVEL = {\n \"critical\": \"error\",\n \"high\": \"error\",\n \"medium\": \"warning\",\n \"low\": \"note\",\n \"none\": \"none\",\n}\n\n\nSEVERITY_TO_SCORE = {\n \"critical\": \"10.0\",\n \"high\": \"8.9\",\n \"medium\": \"6.9\",\n \"low\": \"3.9\",\n \"none\": \"0.0\",\n}\n\n\nclass Sarif:\n def __init__(self, reports: list[Report], tool: str | None) -> None:\n self.reports = reports\n self.rule_index_map: \"dict[str, int]\" = {}\n self.tool = tool if tool else \"Bridgecrew\"\n\n self.json = self.create_json()\n\n def create_json(self) -> dict[str, Any]:\n return {\n \"$schema\": \"https://raw.githubusercontent.com/oasis-tcs/sarif-spec/master/Schemata/sarif-schema-2.1.0.json\",\n \"version\": \"2.1.0\",\n \"runs\": self._create_runs(),\n }\n\n def _create_runs(self) -> list[dict[str, Any]]:\n information_uri = \"https://docs.bridgecrew.io\" if self.tool.lower() == \"bridgecrew\" else \"https://checkov.io\"\n rules = self._create_rules() # needs to be invoked before _create_results()\n results = self._create_results()\n\n return [\n {\n \"tool\": {\n \"driver\": {\n \"name\": self.tool,\n \"version\": version,\n \"informationUri\": information_uri,\n \"rules\": rules,\n \"organization\": \"bridgecrew\",\n }\n },\n \"results\": results,\n }\n ]\n\n def _create_rules(self) -> list[dict[str, Any]]:\n rule_idx = 0\n rules: \"list[dict[str, Any]]\" = []\n\n for report in self.reports:\n if report.check_type in SCA_CHECKTYPES:\n for record in itertools.chain(report.failed_checks, report.skipped_checks):\n rule = None\n if record.check_id.startswith(\"BC_LIC\"):\n rule = self._create_license_rule(check_type=report.check_type, record=record)\n elif record.check_id.startswith((\"BC_VUL\", \"CKV_CVE\")):\n rule = self._create_cve_rule(check_type=report.check_type, record=record)\n\n if rule and rule[\"id\"] not in self.rule_index_map:\n self.rule_index_map[rule[\"id\"]] = rule_idx\n rules.append(rule)\n rule_idx += 1\n else:\n for record in itertools.chain(report.failed_checks, report.skipped_checks):\n if record.check_id not in self.rule_index_map:\n rule = self._create_iac_rule(check_type=report.check_type, record=record)\n self.rule_index_map[rule[\"id\"]] = rule_idx\n rules.append(rule)\n rule_idx += 1\n\n return rules\n\n def _create_iac_rule(self, check_type: str, record: Record) -> dict[str, Any]:\n rule = {\n \"id\": self._create_rule_id(check_type=check_type, record=record),\n \"name\": record.short_description or record.check_name,\n \"shortDescription\": {\n \"text\": record.short_description or record.check_name,\n },\n \"fullDescription\": {\n \"text\": record.description or record.check_name,\n },\n \"help\": {\n \"text\": f\"{record.check_name}\\nResource: {record.resource}\",\n },\n \"defaultConfiguration\": {\"level\": \"error\"},\n }\n\n # Adding 'properties' dictionary only if 'record.severity' exists\n if record.severity:\n rule[\"properties\"] = {\n \"security-severity\": SEVERITY_TO_SCORE.get(record.severity.name.lower(), \"0.0\"),\n }\n\n help_uri = record.guideline\n if valid_url(help_uri):\n rule[\"helpUri\"] = help_uri\n\n return rule\n\n def _create_cve_rule(self, check_type: str, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n rule = {\n \"id\": self._create_rule_id(check_type=check_type, record=record),\n \"name\": record.short_description or record.check_name,\n \"shortDescription\": {\n \"text\": record.short_description or record.check_name,\n },\n \"fullDescription\": {\n \"text\": record.description or record.check_name,\n },\n \"help\": {\n \"text\": f\"{record.check_name}\\nResource: {record.resource}\\nStatus: {details.get('status')}\",\n },\n \"defaultConfiguration\": {\"level\": \"error\"},\n }\n\n # Add properties dictionary with security-severity\n cvss = details.get(\"cvss\")\n if cvss:\n # use CVSS, if exists\n rule[\"properties\"] = {\n \"security-severity\": str(cvss),\n }\n elif record.severity:\n # otherwise severity, if exists\n rule[\"properties\"] = {\n \"security-severity\": SEVERITY_TO_SCORE.get(record.severity.name.lower(), \"0.0\"),\n }\n\n help_uri = details.get(\"link\")\n if valid_url(help_uri):\n rule[\"helpUri\"] = help_uri\n\n return rule\n\n def _create_license_rule(self, check_type: str, record: Record) -> dict[str, Any] | None:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n rule = {\n \"id\": self._create_rule_id(check_type=check_type, record=record),\n \"name\": record.short_description or record.check_name,\n \"shortDescription\": {\n \"text\": record.short_description or record.check_name,\n },\n \"fullDescription\": {\n \"text\": f\"Package {details['package_name']}@{details['package_version']} has license {details['license']}\",\n },\n \"help\": {\n \"text\": f\"{record.check_name}\\nResource: {record.resource}\",\n },\n \"defaultConfiguration\": {\"level\": \"error\"},\n }\n\n # Adding 'properties' dictionary only if 'record.severity' exists\n if record.severity:\n rule[\"properties\"] = {\n \"security-severity\": SEVERITY_TO_SCORE.get(record.severity.name.lower(), \"0.0\"),\n }\n\n help_uri = record.guideline\n if valid_url(help_uri):\n rule[\"helpUri\"] = help_uri\n\n return rule\n\n def _create_results(self) -> list[dict[str, Any]]:\n results: \"list[dict[str, Any]]\" = []\n\n for report in self.reports:\n for record in itertools.chain(report.failed_checks, report.skipped_checks):\n level = \"warning\"\n if record.severity:\n level = SEVERITY_TO_SARIF_LEVEL.get(record.severity.name.lower(), \"none\")\n elif record.check_result.get(\"result\") == CheckResult.FAILED:\n level = \"error\"\n\n rule_id = self._create_rule_id(check_type=report.check_type, record=record)\n if not rule_id or rule_id not in self.rule_index_map:\n # can happen if data is missing\n continue\n\n result = {\n \"ruleId\": rule_id,\n \"ruleIndex\": self.rule_index_map[rule_id],\n \"level\": level,\n \"attachments\": [{\"description\": detail} for detail in record.details],\n \"message\": {\n \"text\": record.short_description or record.check_name,\n },\n \"locations\": [\n {\n \"physicalLocation\": {\n \"artifactLocation\": {\"uri\": quote(record.repo_file_path.lstrip(\"/\"))},\n \"region\": {\n \"startLine\": int(record.file_line_range[0]) or 1,\n \"endLine\": int(record.file_line_range[1]) or 1,\n \"snippet\": {\"text\": \"\".join(line for _, line in record.code_block)},\n },\n }\n }\n ],\n }\n\n if record.check_result.get(\"result\") == CheckResult.SKIPPED:\n # sca_package suppression can only be enabled via flag\n # other runners only report in source suppression\n kind = \"external\" if record.vulnerability_details else \"inSource\"\n justification = record.check_result.get(\"suppress_comment\")\n if justification is None:\n justification = \"No comment provided\"\n\n result[\"suppressions\"] = [\n {\n \"kind\": kind,\n \"justification\": justification,\n }\n ]\n\n results.append(result)\n\n return results\n\n def _create_rule_id(self, check_type: str, record: Record) -> str | None:\n if check_type in SCA_CHECKTYPES:\n details = record.vulnerability_details\n if not details:\n # this shouldn't happen\n return None\n\n if record.check_id.startswith(\"BC_LIC\"):\n return f\"{details['license']}_{details['package_name']}@{details['package_version']}\".replace(\" \", \"_\")\n elif record.check_id.startswith((\"BC_VUL\", \"CKV_CVE\")):\n return f\"{details['id']}_{details['package_name']}@{details['package_version']}\".replace(\" \", \"_\")\n else:\n return record.check_id\n\n return None\n\n def write_sarif_output(self) -> None:\n try:\n with open(\"results.sarif\", \"w\") as f:\n f.write(json.dumps(self.json))\n print(\"\\nWrote output in SARIF format to the file 'results.sarif'\")\n except EnvironmentError as e:\n print(\"\\nAn error occurred while writing SARIF results to file: results.sarif\")\n print(f\"More details: \\n {e}\")\n", "path": "checkov/common/output/sarif.py"}]} | 3,401 | 204 |
gh_patches_debug_14983 | rasdani/github-patches | git_diff | saleor__saleor-5302 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The clear database command should be runnable with debug disabled
We should be able to run `cleardb` when `DEBUG=False` but we should have a `--force` flag to actually allow that action when the debug mode is turned off as it is a dangerous command.
Definition of done:
- Prints an error to stderr when `DEBUG=False` and `--force` is not passed (flagged)
- Exits with 1 (raises `SystemExit` which allows Django to handle it and cleanup the opened connections, such as the database)
- User can clear the database when debug mode is turned off and only when `--force` was passed
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `saleor/core/management/commands/cleardb.py`
Content:
```
1 """Clear the database preserving shop's configuration.
2
3 This command clears the database from data such as orders, products or customer
4 accounts. It doesn't remove shop's configuration, such as: staff accounts, service
5 accounts, plugin configurations, site settings or navigation menus.
6 """
7
8 from django.conf import settings
9 from django.core.management.base import BaseCommand, CommandError
10 from django.db.models import Q
11
12 from ....account.models import User
13 from ....checkout.models import Checkout
14 from ....discount.models import Sale, Voucher
15 from ....giftcard.models import GiftCard
16 from ....order.models import Order
17 from ....page.models import Page
18 from ....payment.models import Payment, Transaction
19 from ....product.models import Attribute, Category, Collection, Product, ProductType
20 from ....shipping.models import ShippingMethod, ShippingZone
21 from ....warehouse.models import Warehouse
22 from ....webhook.models import Webhook
23
24
25 class Command(BaseCommand):
26 help = "Removes data from the database preserving shop configuration."
27
28 def add_arguments(self, parser):
29 parser.add_argument(
30 "--delete-staff",
31 action="store_true",
32 help="Delete staff user accounts (doesn't delete superuser accounts).",
33 )
34
35 def handle(self, **options):
36 if not settings.DEBUG:
37 raise CommandError("Cannot clear the database in DEBUG=True mode.")
38
39 Checkout.objects.all().delete()
40 self.stdout.write("Removed checkouts")
41
42 Transaction.objects.all().delete()
43 self.stdout.write("Removed transactions")
44
45 Payment.objects.all().delete()
46 self.stdout.write("Removed payments")
47
48 Order.objects.all().delete()
49 self.stdout.write("Removed orders")
50
51 Product.objects.all().delete()
52 self.stdout.write("Removed products")
53
54 ProductType.objects.all().delete()
55 self.stdout.write("Removed product types")
56
57 Attribute.objects.all().delete()
58 self.stdout.write("Removed attributes")
59
60 Category.objects.all().delete()
61 self.stdout.write("Removed categories")
62
63 Collection.objects.all().delete()
64 self.stdout.write("Removed collections")
65
66 Sale.objects.all().delete()
67 self.stdout.write("Removed sales")
68
69 ShippingMethod.objects.all().delete()
70 self.stdout.write("Removed shipping methods")
71
72 ShippingZone.objects.all().delete()
73 self.stdout.write("Removed shipping zones")
74
75 Voucher.objects.all().delete()
76 self.stdout.write("Removed vouchers")
77
78 GiftCard.objects.all().delete()
79 self.stdout.write("Removed gift cards")
80
81 self.stdout.write("Removed warehouses")
82 Warehouse.objects.all().delete()
83
84 Page.objects.all().delete()
85 self.stdout.write("Removed pages")
86
87 Webhook.objects.all().delete()
88 self.stdout.write("Removed webhooks")
89
90 # Delete all users except for staff members.
91 staff = User.objects.filter(Q(is_staff=True) | Q(is_superuser=True))
92 User.objects.exclude(pk__in=staff).delete()
93 self.stdout.write("Removed customers")
94
95 should_delete_staff = options.get("delete_staff")
96 if should_delete_staff:
97 staff = staff.exclude(is_superuser=True)
98 staff.delete()
99 self.stdout.write("Removed staff users")
100
101 # Remove addresses of staff members. Used to clear saved addresses of staff
102 # accounts used on demo for testing checkout.
103 for user in staff:
104 user.addresses.all().delete()
105 self.stdout.write("Removed staff addresses")
106
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/saleor/core/management/commands/cleardb.py b/saleor/core/management/commands/cleardb.py
--- a/saleor/core/management/commands/cleardb.py
+++ b/saleor/core/management/commands/cleardb.py
@@ -31,10 +31,16 @@
action="store_true",
help="Delete staff user accounts (doesn't delete superuser accounts).",
)
+ parser.add_argument(
+ "--force",
+ action="store_true",
+ help="Allows running the cleardb command in DEBUG=False mode.",
+ )
def handle(self, **options):
- if not settings.DEBUG:
- raise CommandError("Cannot clear the database in DEBUG=True mode.")
+ force = options.get("force", False)
+ if not settings.DEBUG and not force:
+ raise CommandError("Cannot clear the database in DEBUG=False mode.")
Checkout.objects.all().delete()
self.stdout.write("Removed checkouts")
| {"golden_diff": "diff --git a/saleor/core/management/commands/cleardb.py b/saleor/core/management/commands/cleardb.py\n--- a/saleor/core/management/commands/cleardb.py\n+++ b/saleor/core/management/commands/cleardb.py\n@@ -31,10 +31,16 @@\n action=\"store_true\",\n help=\"Delete staff user accounts (doesn't delete superuser accounts).\",\n )\n+ parser.add_argument(\n+ \"--force\",\n+ action=\"store_true\",\n+ help=\"Allows running the cleardb command in DEBUG=False mode.\",\n+ )\n \n def handle(self, **options):\n- if not settings.DEBUG:\n- raise CommandError(\"Cannot clear the database in DEBUG=True mode.\")\n+ force = options.get(\"force\", False)\n+ if not settings.DEBUG and not force:\n+ raise CommandError(\"Cannot clear the database in DEBUG=False mode.\")\n \n Checkout.objects.all().delete()\n self.stdout.write(\"Removed checkouts\")\n", "issue": "The clear database command should be runnable with debug disabled\nWe should be able to run `cleardb` when `DEBUG=False` but we should have a `--force` flag to actually allow that action when the debug mode is turned off as it is a dangerous command.\r\n\r\nDefinition of done:\r\n- Prints an error to stderr when `DEBUG=False` and `--force` is not passed (flagged)\r\n- Exits with 1 (raises `SystemExit` which allows Django to handle it and cleanup the opened connections, such as the database)\r\n- User can clear the database when debug mode is turned off and only when `--force` was passed\n", "before_files": [{"content": "\"\"\"Clear the database preserving shop's configuration.\n\nThis command clears the database from data such as orders, products or customer\naccounts. It doesn't remove shop's configuration, such as: staff accounts, service\naccounts, plugin configurations, site settings or navigation menus.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.models import Q\n\nfrom ....account.models import User\nfrom ....checkout.models import Checkout\nfrom ....discount.models import Sale, Voucher\nfrom ....giftcard.models import GiftCard\nfrom ....order.models import Order\nfrom ....page.models import Page\nfrom ....payment.models import Payment, Transaction\nfrom ....product.models import Attribute, Category, Collection, Product, ProductType\nfrom ....shipping.models import ShippingMethod, ShippingZone\nfrom ....warehouse.models import Warehouse\nfrom ....webhook.models import Webhook\n\n\nclass Command(BaseCommand):\n help = \"Removes data from the database preserving shop configuration.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--delete-staff\",\n action=\"store_true\",\n help=\"Delete staff user accounts (doesn't delete superuser accounts).\",\n )\n\n def handle(self, **options):\n if not settings.DEBUG:\n raise CommandError(\"Cannot clear the database in DEBUG=True mode.\")\n\n Checkout.objects.all().delete()\n self.stdout.write(\"Removed checkouts\")\n\n Transaction.objects.all().delete()\n self.stdout.write(\"Removed transactions\")\n\n Payment.objects.all().delete()\n self.stdout.write(\"Removed payments\")\n\n Order.objects.all().delete()\n self.stdout.write(\"Removed orders\")\n\n Product.objects.all().delete()\n self.stdout.write(\"Removed products\")\n\n ProductType.objects.all().delete()\n self.stdout.write(\"Removed product types\")\n\n Attribute.objects.all().delete()\n self.stdout.write(\"Removed attributes\")\n\n Category.objects.all().delete()\n self.stdout.write(\"Removed categories\")\n\n Collection.objects.all().delete()\n self.stdout.write(\"Removed collections\")\n\n Sale.objects.all().delete()\n self.stdout.write(\"Removed sales\")\n\n ShippingMethod.objects.all().delete()\n self.stdout.write(\"Removed shipping methods\")\n\n ShippingZone.objects.all().delete()\n self.stdout.write(\"Removed shipping zones\")\n\n Voucher.objects.all().delete()\n self.stdout.write(\"Removed vouchers\")\n\n GiftCard.objects.all().delete()\n self.stdout.write(\"Removed gift cards\")\n\n self.stdout.write(\"Removed warehouses\")\n Warehouse.objects.all().delete()\n\n Page.objects.all().delete()\n self.stdout.write(\"Removed pages\")\n\n Webhook.objects.all().delete()\n self.stdout.write(\"Removed webhooks\")\n\n # Delete all users except for staff members.\n staff = User.objects.filter(Q(is_staff=True) | Q(is_superuser=True))\n User.objects.exclude(pk__in=staff).delete()\n self.stdout.write(\"Removed customers\")\n\n should_delete_staff = options.get(\"delete_staff\")\n if should_delete_staff:\n staff = staff.exclude(is_superuser=True)\n staff.delete()\n self.stdout.write(\"Removed staff users\")\n\n # Remove addresses of staff members. Used to clear saved addresses of staff\n # accounts used on demo for testing checkout.\n for user in staff:\n user.addresses.all().delete()\n self.stdout.write(\"Removed staff addresses\")\n", "path": "saleor/core/management/commands/cleardb.py"}], "after_files": [{"content": "\"\"\"Clear the database preserving shop's configuration.\n\nThis command clears the database from data such as orders, products or customer\naccounts. It doesn't remove shop's configuration, such as: staff accounts, service\naccounts, plugin configurations, site settings or navigation menus.\n\"\"\"\n\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.db.models import Q\n\nfrom ....account.models import User\nfrom ....checkout.models import Checkout\nfrom ....discount.models import Sale, Voucher\nfrom ....giftcard.models import GiftCard\nfrom ....order.models import Order\nfrom ....page.models import Page\nfrom ....payment.models import Payment, Transaction\nfrom ....product.models import Attribute, Category, Collection, Product, ProductType\nfrom ....shipping.models import ShippingMethod, ShippingZone\nfrom ....warehouse.models import Warehouse\nfrom ....webhook.models import Webhook\n\n\nclass Command(BaseCommand):\n help = \"Removes data from the database preserving shop configuration.\"\n\n def add_arguments(self, parser):\n parser.add_argument(\n \"--delete-staff\",\n action=\"store_true\",\n help=\"Delete staff user accounts (doesn't delete superuser accounts).\",\n )\n parser.add_argument(\n \"--force\",\n action=\"store_true\",\n help=\"Allows running the cleardb command in DEBUG=False mode.\",\n )\n\n def handle(self, **options):\n force = options.get(\"force\", False)\n if not settings.DEBUG and not force:\n raise CommandError(\"Cannot clear the database in DEBUG=False mode.\")\n\n Checkout.objects.all().delete()\n self.stdout.write(\"Removed checkouts\")\n\n Transaction.objects.all().delete()\n self.stdout.write(\"Removed transactions\")\n\n Payment.objects.all().delete()\n self.stdout.write(\"Removed payments\")\n\n Order.objects.all().delete()\n self.stdout.write(\"Removed orders\")\n\n Product.objects.all().delete()\n self.stdout.write(\"Removed products\")\n\n ProductType.objects.all().delete()\n self.stdout.write(\"Removed product types\")\n\n Attribute.objects.all().delete()\n self.stdout.write(\"Removed attributes\")\n\n Category.objects.all().delete()\n self.stdout.write(\"Removed categories\")\n\n Collection.objects.all().delete()\n self.stdout.write(\"Removed collections\")\n\n Sale.objects.all().delete()\n self.stdout.write(\"Removed sales\")\n\n ShippingMethod.objects.all().delete()\n self.stdout.write(\"Removed shipping methods\")\n\n ShippingZone.objects.all().delete()\n self.stdout.write(\"Removed shipping zones\")\n\n Voucher.objects.all().delete()\n self.stdout.write(\"Removed vouchers\")\n\n GiftCard.objects.all().delete()\n self.stdout.write(\"Removed gift cards\")\n\n self.stdout.write(\"Removed warehouses\")\n Warehouse.objects.all().delete()\n\n Page.objects.all().delete()\n self.stdout.write(\"Removed pages\")\n\n Webhook.objects.all().delete()\n self.stdout.write(\"Removed webhooks\")\n\n # Delete all users except for staff members.\n staff = User.objects.filter(Q(is_staff=True) | Q(is_superuser=True))\n User.objects.exclude(pk__in=staff).delete()\n self.stdout.write(\"Removed customers\")\n\n should_delete_staff = options.get(\"delete_staff\")\n if should_delete_staff:\n staff = staff.exclude(is_superuser=True)\n staff.delete()\n self.stdout.write(\"Removed staff users\")\n\n # Remove addresses of staff members. Used to clear saved addresses of staff\n # accounts used on demo for testing checkout.\n for user in staff:\n user.addresses.all().delete()\n self.stdout.write(\"Removed staff addresses\")\n", "path": "saleor/core/management/commands/cleardb.py"}]} | 1,307 | 223 |
gh_patches_debug_6825 | rasdani/github-patches | git_diff | microsoft__botbuilder-python-1579 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"no match response" button present in qna maker dialog when active learning is disabled
Python tracking issue for repo code-owners
See original issue for details: microsoft/botframework-sdk#6146
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License.
3
4 from typing import List
5 from botbuilder.core import CardFactory
6 from botbuilder.schema import Activity, ActivityTypes, CardAction, HeroCard
7
8 from ..models import QueryResult
9
10
11 class QnACardBuilder:
12 """
13 Message activity card builder for QnAMaker dialogs.
14 """
15
16 @staticmethod
17 def get_suggestions_card(
18 suggestions: List[str], card_title: str, card_no_match: str
19 ) -> Activity:
20 """
21 Get active learning suggestions card.
22 """
23
24 if not suggestions:
25 raise TypeError("suggestions list is required")
26
27 if not card_title:
28 raise TypeError("card_title is required")
29
30 if not card_no_match:
31 raise TypeError("card_no_match is required")
32
33 # Add all suggestions
34 button_list = [
35 CardAction(value=suggestion, type="imBack", title=suggestion)
36 for suggestion in suggestions
37 ]
38
39 # Add No match text
40 button_list.append(
41 CardAction(value=card_no_match, type="imBack", title=card_no_match)
42 )
43
44 attachment = CardFactory.hero_card(HeroCard(buttons=button_list))
45
46 return Activity(
47 type=ActivityTypes.message, text=card_title, attachments=[attachment]
48 )
49
50 @staticmethod
51 def get_qna_prompts_card(result: QueryResult, card_no_match_text: str) -> Activity:
52 """
53 Get active learning suggestions card.
54 """
55
56 if not result:
57 raise TypeError("result is required")
58
59 if not card_no_match_text:
60 raise TypeError("card_no_match_text is required")
61
62 # Add all prompts
63 button_list = [
64 CardAction(
65 value=prompt.display_text, type="imBack", title=prompt.display_text,
66 )
67 for prompt in result.context.prompts
68 ]
69
70 # Add No match text
71 button_list.append(
72 CardAction(
73 value=card_no_match_text, type="imBack", title=card_no_match_text,
74 )
75 )
76
77 attachment = CardFactory.hero_card(HeroCard(buttons=button_list))
78
79 return Activity(
80 type=ActivityTypes.message, text=result.answer, attachments=[attachment]
81 )
82
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py
--- a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py
+++ b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py
@@ -67,13 +67,6 @@
for prompt in result.context.prompts
]
- # Add No match text
- button_list.append(
- CardAction(
- value=card_no_match_text, type="imBack", title=card_no_match_text,
- )
- )
-
attachment = CardFactory.hero_card(HeroCard(buttons=button_list))
return Activity(
| {"golden_diff": "diff --git a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py\n--- a/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py\n+++ b/libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py\n@@ -67,13 +67,6 @@\n for prompt in result.context.prompts\r\n ]\r\n \r\n- # Add No match text\r\n- button_list.append(\r\n- CardAction(\r\n- value=card_no_match_text, type=\"imBack\", title=card_no_match_text,\r\n- )\r\n- )\r\n-\r\n attachment = CardFactory.hero_card(HeroCard(buttons=button_list))\r\n \r\n return Activity(\n", "issue": "\"no match response\" button present in qna maker dialog when active learning is disabled\nPython tracking issue for repo code-owners\r\n\r\nSee original issue for details: microsoft/botframework-sdk#6146\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom typing import List\r\nfrom botbuilder.core import CardFactory\r\nfrom botbuilder.schema import Activity, ActivityTypes, CardAction, HeroCard\r\n\r\nfrom ..models import QueryResult\r\n\r\n\r\nclass QnACardBuilder:\r\n \"\"\"\r\n Message activity card builder for QnAMaker dialogs.\r\n \"\"\"\r\n\r\n @staticmethod\r\n def get_suggestions_card(\r\n suggestions: List[str], card_title: str, card_no_match: str\r\n ) -> Activity:\r\n \"\"\"\r\n Get active learning suggestions card.\r\n \"\"\"\r\n\r\n if not suggestions:\r\n raise TypeError(\"suggestions list is required\")\r\n\r\n if not card_title:\r\n raise TypeError(\"card_title is required\")\r\n\r\n if not card_no_match:\r\n raise TypeError(\"card_no_match is required\")\r\n\r\n # Add all suggestions\r\n button_list = [\r\n CardAction(value=suggestion, type=\"imBack\", title=suggestion)\r\n for suggestion in suggestions\r\n ]\r\n\r\n # Add No match text\r\n button_list.append(\r\n CardAction(value=card_no_match, type=\"imBack\", title=card_no_match)\r\n )\r\n\r\n attachment = CardFactory.hero_card(HeroCard(buttons=button_list))\r\n\r\n return Activity(\r\n type=ActivityTypes.message, text=card_title, attachments=[attachment]\r\n )\r\n\r\n @staticmethod\r\n def get_qna_prompts_card(result: QueryResult, card_no_match_text: str) -> Activity:\r\n \"\"\"\r\n Get active learning suggestions card.\r\n \"\"\"\r\n\r\n if not result:\r\n raise TypeError(\"result is required\")\r\n\r\n if not card_no_match_text:\r\n raise TypeError(\"card_no_match_text is required\")\r\n\r\n # Add all prompts\r\n button_list = [\r\n CardAction(\r\n value=prompt.display_text, type=\"imBack\", title=prompt.display_text,\r\n )\r\n for prompt in result.context.prompts\r\n ]\r\n\r\n # Add No match text\r\n button_list.append(\r\n CardAction(\r\n value=card_no_match_text, type=\"imBack\", title=card_no_match_text,\r\n )\r\n )\r\n\r\n attachment = CardFactory.hero_card(HeroCard(buttons=button_list))\r\n\r\n return Activity(\r\n type=ActivityTypes.message, text=result.answer, attachments=[attachment]\r\n )\r\n", "path": "libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\r\n# Licensed under the MIT License.\r\n\r\nfrom typing import List\r\nfrom botbuilder.core import CardFactory\r\nfrom botbuilder.schema import Activity, ActivityTypes, CardAction, HeroCard\r\n\r\nfrom ..models import QueryResult\r\n\r\n\r\nclass QnACardBuilder:\r\n \"\"\"\r\n Message activity card builder for QnAMaker dialogs.\r\n \"\"\"\r\n\r\n @staticmethod\r\n def get_suggestions_card(\r\n suggestions: List[str], card_title: str, card_no_match: str\r\n ) -> Activity:\r\n \"\"\"\r\n Get active learning suggestions card.\r\n \"\"\"\r\n\r\n if not suggestions:\r\n raise TypeError(\"suggestions list is required\")\r\n\r\n if not card_title:\r\n raise TypeError(\"card_title is required\")\r\n\r\n if not card_no_match:\r\n raise TypeError(\"card_no_match is required\")\r\n\r\n # Add all suggestions\r\n button_list = [\r\n CardAction(value=suggestion, type=\"imBack\", title=suggestion)\r\n for suggestion in suggestions\r\n ]\r\n\r\n # Add No match text\r\n button_list.append(\r\n CardAction(value=card_no_match, type=\"imBack\", title=card_no_match)\r\n )\r\n\r\n attachment = CardFactory.hero_card(HeroCard(buttons=button_list))\r\n\r\n return Activity(\r\n type=ActivityTypes.message, text=card_title, attachments=[attachment]\r\n )\r\n\r\n @staticmethod\r\n def get_qna_prompts_card(result: QueryResult, card_no_match_text: str) -> Activity:\r\n \"\"\"\r\n Get active learning suggestions card.\r\n \"\"\"\r\n\r\n if not result:\r\n raise TypeError(\"result is required\")\r\n\r\n if not card_no_match_text:\r\n raise TypeError(\"card_no_match_text is required\")\r\n\r\n # Add all prompts\r\n button_list = [\r\n CardAction(\r\n value=prompt.display_text, type=\"imBack\", title=prompt.display_text,\r\n )\r\n for prompt in result.context.prompts\r\n ]\r\n\r\n attachment = CardFactory.hero_card(HeroCard(buttons=button_list))\r\n\r\n return Activity(\r\n type=ActivityTypes.message, text=result.answer, attachments=[attachment]\r\n )\r\n", "path": "libraries/botbuilder-ai/botbuilder/ai/qna/utils/qna_card_builder.py"}]} | 960 | 185 |
gh_patches_debug_4001 | rasdani/github-patches | git_diff | pwndbg__pwndbg-1218 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`leakfind` should default to `$sp`
The first argument to `leakfind` is required, but it should just default to `$sp` like `probeleak` does.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pwndbg/commands/leakfind.py`
Content:
```
1 """
2 Find a chain of leaks given some starting address.
3 """
4
5 import argparse
6 import queue
7
8 import gdb
9
10 import pwndbg.color.chain as C
11 import pwndbg.color.memory as M
12 import pwndbg.color.message as message
13 import pwndbg.commands
14 import pwndbg.vmmap
15 from pwndbg.chain import config_arrow_right
16
17
18 # Used to recursively print the pointer chain.
19 # addr is a pointer. It is taken to be a child pointer.
20 # visited_map is a map of children -> (parent,parent_start)
21 def get_rec_addr_string(addr, visited_map):
22 page = pwndbg.vmmap.find(addr)
23 arrow_right = C.arrow(" %s " % config_arrow_right)
24
25 if page is not None:
26 if addr not in visited_map:
27 return ""
28
29 parent_info = visited_map[addr]
30 parent = parent_info[0]
31 parent_base_addr = parent_info[1]
32 if parent - parent_base_addr < 0:
33 curText = hex(parent_base_addr) + hex(parent - parent_base_addr)
34 else:
35 curText = hex(parent_base_addr) + "+" + hex(parent - parent_base_addr)
36 if parent_base_addr == addr:
37 return ""
38 return (
39 get_rec_addr_string(parent_base_addr, visited_map)
40 + M.get(parent_base_addr, text=curText)
41 + arrow_right
42 )
43 else:
44 return ""
45
46
47 # Useful for debugging. Prints a map of child -> (parent, parent_start)
48 def dbg_print_map(maps):
49 for child, parent_info in maps.items():
50 print("0x%x + (0x%x, 0x%x)" % (child, parent_info[0], parent_info[1]))
51
52
53 parser = argparse.ArgumentParser()
54 parser.description = """
55 Attempt to find a leak chain given a starting address.
56 Scans memory near the given address, looks for pointers, and continues that process to attempt to find leaks.
57
58 Example: leakfind $rsp --page_name=filename --max_offset=0x48 --max_depth=6. This would look for any chains of leaks \
59 that point to a section in filename which begin near $rsp, are never 0x48 bytes further from a known pointer, \
60 and are a maximum length of 6.
61 """
62 parser.formatter_class = argparse.RawDescriptionHelpFormatter
63 parser.add_argument("address", help="Starting address to find a leak chain from")
64 parser.add_argument(
65 "-p",
66 "--page_name",
67 type=str,
68 nargs="?",
69 default=None,
70 help="Substring required to be part of the name of any found pages",
71 )
72 parser.add_argument(
73 "-o",
74 "--max_offset",
75 default=0x48,
76 nargs="?",
77 help="Max offset to add to addresses when looking for leak",
78 )
79 parser.add_argument(
80 "-d", "--max_depth", default=0x4, nargs="?", help="Maximum depth to follow pointers to"
81 )
82 parser.add_argument(
83 "-s",
84 "--step",
85 nargs="?",
86 default=0x1,
87 help="Step to add between pointers so they are considered. For example, if this is 4 it would only consider pointers at an offset divisible by 4 from the starting pointer",
88 )
89 parser.add_argument(
90 "--negative_offset",
91 nargs="?",
92 default=0x0,
93 help="Max negative offset to search before an address when looking for a leak",
94 )
95
96
97 @pwndbg.commands.ArgparsedCommand(parser)
98 @pwndbg.commands.OnlyWhenRunning
99 def leakfind(
100 address=None, page_name=None, max_offset=0x40, max_depth=0x4, step=0x1, negative_offset=0x0
101 ):
102 if address is None:
103 raise argparse.ArgumentTypeError("No starting address provided.")
104 foundPages = pwndbg.vmmap.find(address)
105
106 if not foundPages:
107 raise argparse.ArgumentTypeError("Starting address is not mapped.")
108
109 if not pwndbg.gdblib.memory.peek(address):
110 raise argparse.ArgumentTypeError("Unable to read from starting address.")
111
112 max_depth = int(max_depth)
113 # Just warn the user that a large depth might be slow.
114 # Probably worth checking offset^depth < threshold. Do this when more benchmarking is established.
115 if max_depth > 8:
116 print(message.warn("leakfind may take a while to run on larger depths."))
117
118 stride = int(step)
119 address = int(address)
120 max_offset = int(max_offset)
121 negative_offset = int(negative_offset)
122
123 # The below map stores a map of child address->(parent_address,parent_start_address)
124 # In the above tuple, parent_address is the exact address with a pointer to the child address.
125 # parent_start_address is an address that a previous address pointed to.
126 # We need to store both so that we can nicely create our leak chain.
127 visited_map = {}
128 visited_set = {int(address)}
129 address_queue = queue.Queue()
130 address_queue.put(int(address))
131 depth = 0
132 time_to_depth_increase = 0
133
134 # Run a bfs
135 # TODO look into performance gain from checking if an address is mapped before calling pwndbg.gdblib.memory.pvoid()
136 # TODO also check using pwndbg.gdblib.memory.read for possible performance boosts.
137 while address_queue.qsize() > 0 and depth < max_depth:
138 if time_to_depth_increase == 0:
139 depth = depth + 1
140 time_to_depth_increase = address_queue.qsize()
141 cur_start_addr = address_queue.get()
142 time_to_depth_increase -= 1
143 for cur_addr in range(
144 cur_start_addr - negative_offset, cur_start_addr + max_offset, stride
145 ):
146 try:
147 cur_addr &= pwndbg.gdblib.arch.ptrmask
148 result = int(pwndbg.gdblib.memory.pvoid(cur_addr))
149 if result in visited_map or result in visited_set:
150 continue
151 visited_map[result] = (
152 cur_addr,
153 cur_start_addr,
154 ) # map is of form child->(parent,parent_start)
155 address_queue.put(result)
156 visited_set.add(result)
157 except gdb.error:
158 # That means the memory was unmapped. Just skip it if we can't read it.
159 break
160
161 # A map of length->list of lines. Used to let us print in a somewhat nice manner.
162 output_map = {}
163 arrow_right = C.arrow(" %s " % config_arrow_right)
164
165 for child in visited_map:
166 child_page = pwndbg.vmmap.find(child)
167 if child_page is not None:
168 if page_name is not None and page_name not in child_page.objfile:
169 continue
170 line = (
171 get_rec_addr_string(child, visited_map)
172 + M.get(child)
173 + " "
174 + M.get(child, text=child_page.objfile)
175 )
176 chain_length = line.count(arrow_right)
177 if chain_length in output_map:
178 output_map[chain_length].append(line)
179 else:
180 output_map[chain_length] = [line]
181
182 # Output sorted by length of chain
183 for chain_length in output_map:
184 for line in output_map[chain_length]:
185 print(line)
186
187 if pwndbg.gdblib.qemu.is_qemu():
188 print("\n[QEMU target detected - leakfind result might not be accurate; see `help vmmap`]")
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/pwndbg/commands/leakfind.py b/pwndbg/commands/leakfind.py
--- a/pwndbg/commands/leakfind.py
+++ b/pwndbg/commands/leakfind.py
@@ -60,7 +60,9 @@
and are a maximum length of 6.
"""
parser.formatter_class = argparse.RawDescriptionHelpFormatter
-parser.add_argument("address", help="Starting address to find a leak chain from")
+parser.add_argument(
+ "address", nargs="?", default="$sp", help="Starting address to find a leak chain from"
+)
parser.add_argument(
"-p",
"--page_name",
| {"golden_diff": "diff --git a/pwndbg/commands/leakfind.py b/pwndbg/commands/leakfind.py\n--- a/pwndbg/commands/leakfind.py\n+++ b/pwndbg/commands/leakfind.py\n@@ -60,7 +60,9 @@\n and are a maximum length of 6.\n \"\"\"\n parser.formatter_class = argparse.RawDescriptionHelpFormatter\n-parser.add_argument(\"address\", help=\"Starting address to find a leak chain from\")\n+parser.add_argument(\n+ \"address\", nargs=\"?\", default=\"$sp\", help=\"Starting address to find a leak chain from\"\n+)\n parser.add_argument(\n \"-p\",\n \"--page_name\",\n", "issue": "`leakfind` should default to `$sp`\nThe first argument to `leakfind` is required, but it should just default to `$sp` like `probeleak` does.\n", "before_files": [{"content": "\"\"\"\nFind a chain of leaks given some starting address.\n\"\"\"\n\nimport argparse\nimport queue\n\nimport gdb\n\nimport pwndbg.color.chain as C\nimport pwndbg.color.memory as M\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.vmmap\nfrom pwndbg.chain import config_arrow_right\n\n\n# Used to recursively print the pointer chain.\n# addr is a pointer. It is taken to be a child pointer.\n# visited_map is a map of children -> (parent,parent_start)\ndef get_rec_addr_string(addr, visited_map):\n page = pwndbg.vmmap.find(addr)\n arrow_right = C.arrow(\" %s \" % config_arrow_right)\n\n if page is not None:\n if addr not in visited_map:\n return \"\"\n\n parent_info = visited_map[addr]\n parent = parent_info[0]\n parent_base_addr = parent_info[1]\n if parent - parent_base_addr < 0:\n curText = hex(parent_base_addr) + hex(parent - parent_base_addr)\n else:\n curText = hex(parent_base_addr) + \"+\" + hex(parent - parent_base_addr)\n if parent_base_addr == addr:\n return \"\"\n return (\n get_rec_addr_string(parent_base_addr, visited_map)\n + M.get(parent_base_addr, text=curText)\n + arrow_right\n )\n else:\n return \"\"\n\n\n# Useful for debugging. Prints a map of child -> (parent, parent_start)\ndef dbg_print_map(maps):\n for child, parent_info in maps.items():\n print(\"0x%x + (0x%x, 0x%x)\" % (child, parent_info[0], parent_info[1]))\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"\"\"\nAttempt to find a leak chain given a starting address.\nScans memory near the given address, looks for pointers, and continues that process to attempt to find leaks.\n\nExample: leakfind $rsp --page_name=filename --max_offset=0x48 --max_depth=6. This would look for any chains of leaks \\\nthat point to a section in filename which begin near $rsp, are never 0x48 bytes further from a known pointer, \\\nand are a maximum length of 6.\n\"\"\"\nparser.formatter_class = argparse.RawDescriptionHelpFormatter\nparser.add_argument(\"address\", help=\"Starting address to find a leak chain from\")\nparser.add_argument(\n \"-p\",\n \"--page_name\",\n type=str,\n nargs=\"?\",\n default=None,\n help=\"Substring required to be part of the name of any found pages\",\n)\nparser.add_argument(\n \"-o\",\n \"--max_offset\",\n default=0x48,\n nargs=\"?\",\n help=\"Max offset to add to addresses when looking for leak\",\n)\nparser.add_argument(\n \"-d\", \"--max_depth\", default=0x4, nargs=\"?\", help=\"Maximum depth to follow pointers to\"\n)\nparser.add_argument(\n \"-s\",\n \"--step\",\n nargs=\"?\",\n default=0x1,\n help=\"Step to add between pointers so they are considered. For example, if this is 4 it would only consider pointers at an offset divisible by 4 from the starting pointer\",\n)\nparser.add_argument(\n \"--negative_offset\",\n nargs=\"?\",\n default=0x0,\n help=\"Max negative offset to search before an address when looking for a leak\",\n)\n\n\[email protected](parser)\[email protected]\ndef leakfind(\n address=None, page_name=None, max_offset=0x40, max_depth=0x4, step=0x1, negative_offset=0x0\n):\n if address is None:\n raise argparse.ArgumentTypeError(\"No starting address provided.\")\n foundPages = pwndbg.vmmap.find(address)\n\n if not foundPages:\n raise argparse.ArgumentTypeError(\"Starting address is not mapped.\")\n\n if not pwndbg.gdblib.memory.peek(address):\n raise argparse.ArgumentTypeError(\"Unable to read from starting address.\")\n\n max_depth = int(max_depth)\n # Just warn the user that a large depth might be slow.\n # Probably worth checking offset^depth < threshold. Do this when more benchmarking is established.\n if max_depth > 8:\n print(message.warn(\"leakfind may take a while to run on larger depths.\"))\n\n stride = int(step)\n address = int(address)\n max_offset = int(max_offset)\n negative_offset = int(negative_offset)\n\n # The below map stores a map of child address->(parent_address,parent_start_address)\n # In the above tuple, parent_address is the exact address with a pointer to the child address.\n # parent_start_address is an address that a previous address pointed to.\n # We need to store both so that we can nicely create our leak chain.\n visited_map = {}\n visited_set = {int(address)}\n address_queue = queue.Queue()\n address_queue.put(int(address))\n depth = 0\n time_to_depth_increase = 0\n\n # Run a bfs\n # TODO look into performance gain from checking if an address is mapped before calling pwndbg.gdblib.memory.pvoid()\n # TODO also check using pwndbg.gdblib.memory.read for possible performance boosts.\n while address_queue.qsize() > 0 and depth < max_depth:\n if time_to_depth_increase == 0:\n depth = depth + 1\n time_to_depth_increase = address_queue.qsize()\n cur_start_addr = address_queue.get()\n time_to_depth_increase -= 1\n for cur_addr in range(\n cur_start_addr - negative_offset, cur_start_addr + max_offset, stride\n ):\n try:\n cur_addr &= pwndbg.gdblib.arch.ptrmask\n result = int(pwndbg.gdblib.memory.pvoid(cur_addr))\n if result in visited_map or result in visited_set:\n continue\n visited_map[result] = (\n cur_addr,\n cur_start_addr,\n ) # map is of form child->(parent,parent_start)\n address_queue.put(result)\n visited_set.add(result)\n except gdb.error:\n # That means the memory was unmapped. Just skip it if we can't read it.\n break\n\n # A map of length->list of lines. Used to let us print in a somewhat nice manner.\n output_map = {}\n arrow_right = C.arrow(\" %s \" % config_arrow_right)\n\n for child in visited_map:\n child_page = pwndbg.vmmap.find(child)\n if child_page is not None:\n if page_name is not None and page_name not in child_page.objfile:\n continue\n line = (\n get_rec_addr_string(child, visited_map)\n + M.get(child)\n + \" \"\n + M.get(child, text=child_page.objfile)\n )\n chain_length = line.count(arrow_right)\n if chain_length in output_map:\n output_map[chain_length].append(line)\n else:\n output_map[chain_length] = [line]\n\n # Output sorted by length of chain\n for chain_length in output_map:\n for line in output_map[chain_length]:\n print(line)\n\n if pwndbg.gdblib.qemu.is_qemu():\n print(\"\\n[QEMU target detected - leakfind result might not be accurate; see `help vmmap`]\")\n", "path": "pwndbg/commands/leakfind.py"}], "after_files": [{"content": "\"\"\"\nFind a chain of leaks given some starting address.\n\"\"\"\n\nimport argparse\nimport queue\n\nimport gdb\n\nimport pwndbg.color.chain as C\nimport pwndbg.color.memory as M\nimport pwndbg.color.message as message\nimport pwndbg.commands\nimport pwndbg.vmmap\nfrom pwndbg.chain import config_arrow_right\n\n\n# Used to recursively print the pointer chain.\n# addr is a pointer. It is taken to be a child pointer.\n# visited_map is a map of children -> (parent,parent_start)\ndef get_rec_addr_string(addr, visited_map):\n page = pwndbg.vmmap.find(addr)\n arrow_right = C.arrow(\" %s \" % config_arrow_right)\n\n if page is not None:\n if addr not in visited_map:\n return \"\"\n\n parent_info = visited_map[addr]\n parent = parent_info[0]\n parent_base_addr = parent_info[1]\n if parent - parent_base_addr < 0:\n curText = hex(parent_base_addr) + hex(parent - parent_base_addr)\n else:\n curText = hex(parent_base_addr) + \"+\" + hex(parent - parent_base_addr)\n if parent_base_addr == addr:\n return \"\"\n return (\n get_rec_addr_string(parent_base_addr, visited_map)\n + M.get(parent_base_addr, text=curText)\n + arrow_right\n )\n else:\n return \"\"\n\n\n# Useful for debugging. Prints a map of child -> (parent, parent_start)\ndef dbg_print_map(maps):\n for child, parent_info in maps.items():\n print(\"0x%x + (0x%x, 0x%x)\" % (child, parent_info[0], parent_info[1]))\n\n\nparser = argparse.ArgumentParser()\nparser.description = \"\"\"\nAttempt to find a leak chain given a starting address.\nScans memory near the given address, looks for pointers, and continues that process to attempt to find leaks.\n\nExample: leakfind $rsp --page_name=filename --max_offset=0x48 --max_depth=6. This would look for any chains of leaks \\\nthat point to a section in filename which begin near $rsp, are never 0x48 bytes further from a known pointer, \\\nand are a maximum length of 6.\n\"\"\"\nparser.formatter_class = argparse.RawDescriptionHelpFormatter\nparser.add_argument(\n \"address\", nargs=\"?\", default=\"$sp\", help=\"Starting address to find a leak chain from\"\n)\nparser.add_argument(\n \"-p\",\n \"--page_name\",\n type=str,\n nargs=\"?\",\n default=None,\n help=\"Substring required to be part of the name of any found pages\",\n)\nparser.add_argument(\n \"-o\",\n \"--max_offset\",\n default=0x48,\n nargs=\"?\",\n help=\"Max offset to add to addresses when looking for leak\",\n)\nparser.add_argument(\n \"-d\", \"--max_depth\", default=0x4, nargs=\"?\", help=\"Maximum depth to follow pointers to\"\n)\nparser.add_argument(\n \"-s\",\n \"--step\",\n nargs=\"?\",\n default=0x1,\n help=\"Step to add between pointers so they are considered. For example, if this is 4 it would only consider pointers at an offset divisible by 4 from the starting pointer\",\n)\nparser.add_argument(\n \"--negative_offset\",\n nargs=\"?\",\n default=0x0,\n help=\"Max negative offset to search before an address when looking for a leak\",\n)\n\n\[email protected](parser)\[email protected]\ndef leakfind(\n address=None, page_name=None, max_offset=0x40, max_depth=0x4, step=0x1, negative_offset=0x0\n):\n if address is None:\n raise argparse.ArgumentTypeError(\"No starting address provided.\")\n foundPages = pwndbg.vmmap.find(address)\n\n if not foundPages:\n raise argparse.ArgumentTypeError(\"Starting address is not mapped.\")\n\n if not pwndbg.gdblib.memory.peek(address):\n raise argparse.ArgumentTypeError(\"Unable to read from starting address.\")\n\n max_depth = int(max_depth)\n # Just warn the user that a large depth might be slow.\n # Probably worth checking offset^depth < threshold. Do this when more benchmarking is established.\n if max_depth > 8:\n print(message.warn(\"leakfind may take a while to run on larger depths.\"))\n\n stride = int(step)\n address = int(address)\n max_offset = int(max_offset)\n negative_offset = int(negative_offset)\n\n # The below map stores a map of child address->(parent_address,parent_start_address)\n # In the above tuple, parent_address is the exact address with a pointer to the child address.\n # parent_start_address is an address that a previous address pointed to.\n # We need to store both so that we can nicely create our leak chain.\n visited_map = {}\n visited_set = {int(address)}\n address_queue = queue.Queue()\n address_queue.put(int(address))\n depth = 0\n time_to_depth_increase = 0\n\n # Run a bfs\n # TODO look into performance gain from checking if an address is mapped before calling pwndbg.gdblib.memory.pvoid()\n # TODO also check using pwndbg.gdblib.memory.read for possible performance boosts.\n while address_queue.qsize() > 0 and depth < max_depth:\n if time_to_depth_increase == 0:\n depth = depth + 1\n time_to_depth_increase = address_queue.qsize()\n cur_start_addr = address_queue.get()\n time_to_depth_increase -= 1\n for cur_addr in range(\n cur_start_addr - negative_offset, cur_start_addr + max_offset, stride\n ):\n try:\n cur_addr &= pwndbg.gdblib.arch.ptrmask\n result = int(pwndbg.gdblib.memory.pvoid(cur_addr))\n if result in visited_map or result in visited_set:\n continue\n visited_map[result] = (\n cur_addr,\n cur_start_addr,\n ) # map is of form child->(parent,parent_start)\n address_queue.put(result)\n visited_set.add(result)\n except gdb.error:\n # That means the memory was unmapped. Just skip it if we can't read it.\n break\n\n # A map of length->list of lines. Used to let us print in a somewhat nice manner.\n output_map = {}\n arrow_right = C.arrow(\" %s \" % config_arrow_right)\n\n for child in visited_map:\n child_page = pwndbg.vmmap.find(child)\n if child_page is not None:\n if page_name is not None and page_name not in child_page.objfile:\n continue\n line = (\n get_rec_addr_string(child, visited_map)\n + M.get(child)\n + \" \"\n + M.get(child, text=child_page.objfile)\n )\n chain_length = line.count(arrow_right)\n if chain_length in output_map:\n output_map[chain_length].append(line)\n else:\n output_map[chain_length] = [line]\n\n # Output sorted by length of chain\n for chain_length in output_map:\n for line in output_map[chain_length]:\n print(line)\n\n if pwndbg.gdblib.qemu.is_qemu():\n print(\"\\n[QEMU target detected - leakfind result might not be accurate; see `help vmmap`]\")\n", "path": "pwndbg/commands/leakfind.py"}]} | 2,344 | 145 |
gh_patches_debug_9875 | rasdani/github-patches | git_diff | mosaicml__composer-592 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Require `split_batch_fn` only for `grad_accum > 1`
For easy out-of-the-box use with custom datatypes, we should only require `split_batch_fn` if `grad_accum > 1`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `composer/core/data_spec.py`
Content:
```
1 # Copyright 2021 MosaicML. All Rights Reserved.
2
3 """Specifications for operating and training on data."""
4 from __future__ import annotations
5
6 import collections.abc
7 import textwrap
8 from typing import TYPE_CHECKING, Callable, List, Optional, Sequence
9
10 import torch
11
12 from composer.utils.iter_helpers import ensure_tuple
13
14 if TYPE_CHECKING:
15 from composer.core.types import Batch, DataLoader
16
17 __all__ = ["DataSpec"]
18
19
20 class DataSpec:
21 """Specifications for operating and training on data.
22
23 An example of constructing a :class:`DataSpec` object with a ``device_transforms`` callable
24 (:class:`~composer.datasets.utils.NormalizationFn`) and then using it with :class:`~.Trainer`:
25
26 >>> # In this case, we apply NormalizationFn
27 >>> # Construct DataSpec as shown below to apply this transformation
28 >>> from composer.datasets.utils import NormalizationFn
29 >>> CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)
30 >>> CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)
31 >>> device_transform_fn = NormalizationFn(mean=CHANNEL_MEAN, std=CHANNEL_STD)
32 >>> train_dspec = DataSpec(train_dataloader, device_transforms=device_transform_fn)
33 >>> # The same function can be used for eval dataloader as well
34 >>> eval_dspec = DataSpec(eval_dataloader, device_transforms=device_transform_fn)
35 >>> # Use this DataSpec object to construct trainer
36 >>> trainer = Trainer(
37 ... model=model,
38 ... train_dataloader=train_dspec,
39 ... eval_dataloader=eval_dspec,
40 ... optimizers=optimizer,
41 ... max_duration="1ep",
42 ... )
43
44 Args:
45 dataloader (DataLoader): The dataloader.
46
47 num_samples (int, optional): The total number of samples in an epoch, across all ranks. This field is used by
48 the :class:`~.time.Timer` (training progress tracker). If not specified, then ``len(dataloader.dataset)`` is
49 used (if this property is available). Otherwise, the dataset is assumed to be unsized.
50
51 num_tokens (int, optional): The total number of tokens in an epoch. This field is used by the
52 :class:`~.time.Timer` (training progress tracker).
53
54 device_transforms ((Batch) -> Batch, optional): Function called by the :class:`~.trainer.Trainer` to modify the
55 batch once it has been moved onto the device. For example, this function can be used for GPU-based
56 normalization. It can modify the batch in-place, and it should return the modified batch. If not specified, the
57 batch is not modified.
58
59 split_batch ((Batch, int) -> Sequence[Batch], optional): Function called by the :class:`~.trainer.Trainer` to
60 split a batch (the first parameter) into the number of microbatches specified (the second parameter). By
61 default, batches of type :attr:`~.types.BatchPair` can be split automatically. If the ``dataloader`` yields
62 batches of a different type, then this function must be specified.
63
64 get_num_samples_in_batch ((Batch) -> int, optional): Function that is called by the :class:`~.trainer.Trainer`
65 to get the number of samples in the provided batch.
66
67 By default, if the batch contains tensors that all have the same 0th dim, then the value of the 0th dim will
68 be returned. If the batch contains tensors where the 0th dim differ, then this function must be specified.
69
70 get_num_tokens_in_batch ((Batch) -> int, optional): Function that is called by the :class:`~.trainer.Trainer` to
71 get the number of tokens in the provided batch.
72
73 By default, it returns 0, meaning that number of tokens processed will not be tracked as a part of the
74 training progress tracking. This function must be specified to track the number of tokens processed during
75 training.
76 """
77
78 def __init__(
79 self,
80 dataloader: DataLoader,
81 num_samples: Optional[int] = None,
82 num_tokens: Optional[int] = None,
83 device_transforms: Optional[Callable[[Batch], Batch]] = None,
84 split_batch: Optional[Callable[[Batch, int], Sequence[Batch]]] = None,
85 get_num_samples_in_batch: Optional[Callable[[Batch], int]] = None,
86 get_num_tokens_in_batch: Optional[Callable[[Batch], int]] = None,
87 ) -> None:
88 self.dataloader = dataloader
89 self.num_tokens = num_tokens
90 self.device_transforms = self._default_device_transforms if device_transforms is None else device_transforms
91 self.split_batch = self._default_split_batch if split_batch is None else split_batch
92 self.get_num_samples_in_batch = self._default_get_num_samples_in_batch if get_num_samples_in_batch is None else get_num_samples_in_batch
93 self.get_num_tokens_in_batch = self._default_get_num_tokens_in_batch if get_num_tokens_in_batch is None else get_num_tokens_in_batch
94 if num_samples is not None:
95 self.num_samples = num_samples
96
97 else:
98 if isinstance(dataloader.dataset, collections.abc.Sized):
99 try:
100 self.num_samples = len(dataloader.dataset)
101 except (TypeError, NotImplementedError):
102 self.num_samples = None
103 else:
104 self.num_samples = None
105
106 def _default_device_transforms(self, batch: Batch):
107 return batch
108
109 def _default_split_batch(self, batch: Batch, num_microbatches: int) -> Sequence[Batch]:
110 if not isinstance(batch, Sequence):
111 raise ValueError(f'split_fn requires batch be a tuple pair of tensors, got {type(batch)}')
112 x, y = batch
113 if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
114 return list(zip(x.chunk(num_microbatches), y.chunk(num_microbatches)))
115 if isinstance(x, List) and isinstance(y, List):
116 return list(
117 zip(
118 [x[i::num_microbatches] for i in range(num_microbatches)],
119 [y[i::num_microbatches] for i in range(num_microbatches)],
120 ))
121 raise NotImplementedError(
122 textwrap.dedent("""\
123 The default split_fn is unable to split the output of this
124 dataloader. Please use a DataSpec and specify `split_batch`."""))
125
126 def _default_get_num_samples_in_batch(self, batch: Batch) -> int:
127 if isinstance(batch, torch.Tensor):
128 return batch.shape[0]
129
130 dim0_sizes = []
131 if isinstance(batch, (list, tuple)):
132 for tensors in batch:
133 for t in ensure_tuple(tensors):
134 dim0_sizes.append(t.shape[0])
135 elif isinstance(batch, dict):
136 dim0_sizes = [t.shape[0] for t in batch.values()]
137
138 if len(set(dim0_sizes)) == 1:
139 return dim0_sizes[0]
140 else:
141 raise NotImplementedError(
142 textwrap.dedent(f"""\
143 Cannot determine the batch size, as multiple Tensors of
144 different lengths were found in the batch: sizes in batch: {dim0_sizes}.
145 Please use a DataSpec and specify `get_num_samples_in_batch`."""))
146
147 def _default_get_num_tokens_in_batch(self, batch: Batch) -> int:
148 del batch # unused
149 return 0
150
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/composer/core/data_spec.py b/composer/core/data_spec.py
--- a/composer/core/data_spec.py
+++ b/composer/core/data_spec.py
@@ -107,6 +107,10 @@
return batch
def _default_split_batch(self, batch: Batch, num_microbatches: int) -> Sequence[Batch]:
+ if num_microbatches < 1:
+ raise ValueError("num_microbatches must be at least 1")
+ if num_microbatches == 1:
+ return [batch]
if not isinstance(batch, Sequence):
raise ValueError(f'split_fn requires batch be a tuple pair of tensors, got {type(batch)}')
x, y = batch
| {"golden_diff": "diff --git a/composer/core/data_spec.py b/composer/core/data_spec.py\n--- a/composer/core/data_spec.py\n+++ b/composer/core/data_spec.py\n@@ -107,6 +107,10 @@\n return batch\n \n def _default_split_batch(self, batch: Batch, num_microbatches: int) -> Sequence[Batch]:\n+ if num_microbatches < 1:\n+ raise ValueError(\"num_microbatches must be at least 1\")\n+ if num_microbatches == 1:\n+ return [batch]\n if not isinstance(batch, Sequence):\n raise ValueError(f'split_fn requires batch be a tuple pair of tensors, got {type(batch)}')\n x, y = batch\n", "issue": "Require `split_batch_fn` only for `grad_accum > 1`\nFor easy out-of-the-box use with custom datatypes, we should only require `split_batch_fn` if `grad_accum > 1`\n", "before_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"Specifications for operating and training on data.\"\"\"\nfrom __future__ import annotations\n\nimport collections.abc\nimport textwrap\nfrom typing import TYPE_CHECKING, Callable, List, Optional, Sequence\n\nimport torch\n\nfrom composer.utils.iter_helpers import ensure_tuple\n\nif TYPE_CHECKING:\n from composer.core.types import Batch, DataLoader\n\n__all__ = [\"DataSpec\"]\n\n\nclass DataSpec:\n \"\"\"Specifications for operating and training on data.\n\n An example of constructing a :class:`DataSpec` object with a ``device_transforms`` callable\n (:class:`~composer.datasets.utils.NormalizationFn`) and then using it with :class:`~.Trainer`:\n\n >>> # In this case, we apply NormalizationFn \n >>> # Construct DataSpec as shown below to apply this transformation\n >>> from composer.datasets.utils import NormalizationFn\n >>> CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)\n >>> CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)\n >>> device_transform_fn = NormalizationFn(mean=CHANNEL_MEAN, std=CHANNEL_STD)\n >>> train_dspec = DataSpec(train_dataloader, device_transforms=device_transform_fn)\n >>> # The same function can be used for eval dataloader as well\n >>> eval_dspec = DataSpec(eval_dataloader, device_transforms=device_transform_fn)\n >>> # Use this DataSpec object to construct trainer\n >>> trainer = Trainer(\n ... model=model,\n ... train_dataloader=train_dspec,\n ... eval_dataloader=eval_dspec,\n ... optimizers=optimizer,\n ... max_duration=\"1ep\",\n ... )\n\n Args:\n dataloader (DataLoader): The dataloader.\n\n num_samples (int, optional): The total number of samples in an epoch, across all ranks. This field is used by\n the :class:`~.time.Timer` (training progress tracker). If not specified, then ``len(dataloader.dataset)`` is\n used (if this property is available). Otherwise, the dataset is assumed to be unsized.\n\n num_tokens (int, optional): The total number of tokens in an epoch. This field is used by the\n :class:`~.time.Timer` (training progress tracker).\n\n device_transforms ((Batch) -> Batch, optional): Function called by the :class:`~.trainer.Trainer` to modify the\n batch once it has been moved onto the device. For example, this function can be used for GPU-based\n normalization. It can modify the batch in-place, and it should return the modified batch. If not specified, the\n batch is not modified.\n\n split_batch ((Batch, int) -> Sequence[Batch], optional): Function called by the :class:`~.trainer.Trainer` to\n split a batch (the first parameter) into the number of microbatches specified (the second parameter). By\n default, batches of type :attr:`~.types.BatchPair` can be split automatically. If the ``dataloader`` yields\n batches of a different type, then this function must be specified.\n\n get_num_samples_in_batch ((Batch) -> int, optional): Function that is called by the :class:`~.trainer.Trainer`\n to get the number of samples in the provided batch.\n\n By default, if the batch contains tensors that all have the same 0th dim, then the value of the 0th dim will\n be returned. If the batch contains tensors where the 0th dim differ, then this function must be specified.\n\n get_num_tokens_in_batch ((Batch) -> int, optional): Function that is called by the :class:`~.trainer.Trainer` to\n get the number of tokens in the provided batch.\n\n By default, it returns 0, meaning that number of tokens processed will not be tracked as a part of the\n training progress tracking. This function must be specified to track the number of tokens processed during\n training.\n \"\"\"\n\n def __init__(\n self,\n dataloader: DataLoader,\n num_samples: Optional[int] = None,\n num_tokens: Optional[int] = None,\n device_transforms: Optional[Callable[[Batch], Batch]] = None,\n split_batch: Optional[Callable[[Batch, int], Sequence[Batch]]] = None,\n get_num_samples_in_batch: Optional[Callable[[Batch], int]] = None,\n get_num_tokens_in_batch: Optional[Callable[[Batch], int]] = None,\n ) -> None:\n self.dataloader = dataloader\n self.num_tokens = num_tokens\n self.device_transforms = self._default_device_transforms if device_transforms is None else device_transforms\n self.split_batch = self._default_split_batch if split_batch is None else split_batch\n self.get_num_samples_in_batch = self._default_get_num_samples_in_batch if get_num_samples_in_batch is None else get_num_samples_in_batch\n self.get_num_tokens_in_batch = self._default_get_num_tokens_in_batch if get_num_tokens_in_batch is None else get_num_tokens_in_batch\n if num_samples is not None:\n self.num_samples = num_samples\n\n else:\n if isinstance(dataloader.dataset, collections.abc.Sized):\n try:\n self.num_samples = len(dataloader.dataset)\n except (TypeError, NotImplementedError):\n self.num_samples = None\n else:\n self.num_samples = None\n\n def _default_device_transforms(self, batch: Batch):\n return batch\n\n def _default_split_batch(self, batch: Batch, num_microbatches: int) -> Sequence[Batch]:\n if not isinstance(batch, Sequence):\n raise ValueError(f'split_fn requires batch be a tuple pair of tensors, got {type(batch)}')\n x, y = batch\n if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):\n return list(zip(x.chunk(num_microbatches), y.chunk(num_microbatches)))\n if isinstance(x, List) and isinstance(y, List):\n return list(\n zip(\n [x[i::num_microbatches] for i in range(num_microbatches)],\n [y[i::num_microbatches] for i in range(num_microbatches)],\n ))\n raise NotImplementedError(\n textwrap.dedent(\"\"\"\\\n The default split_fn is unable to split the output of this\n dataloader. Please use a DataSpec and specify `split_batch`.\"\"\"))\n\n def _default_get_num_samples_in_batch(self, batch: Batch) -> int:\n if isinstance(batch, torch.Tensor):\n return batch.shape[0]\n\n dim0_sizes = []\n if isinstance(batch, (list, tuple)):\n for tensors in batch:\n for t in ensure_tuple(tensors):\n dim0_sizes.append(t.shape[0])\n elif isinstance(batch, dict):\n dim0_sizes = [t.shape[0] for t in batch.values()]\n\n if len(set(dim0_sizes)) == 1:\n return dim0_sizes[0]\n else:\n raise NotImplementedError(\n textwrap.dedent(f\"\"\"\\\n Cannot determine the batch size, as multiple Tensors of\n different lengths were found in the batch: sizes in batch: {dim0_sizes}.\n Please use a DataSpec and specify `get_num_samples_in_batch`.\"\"\"))\n\n def _default_get_num_tokens_in_batch(self, batch: Batch) -> int:\n del batch # unused\n return 0\n", "path": "composer/core/data_spec.py"}], "after_files": [{"content": "# Copyright 2021 MosaicML. All Rights Reserved.\n\n\"\"\"Specifications for operating and training on data.\"\"\"\nfrom __future__ import annotations\n\nimport collections.abc\nimport textwrap\nfrom typing import TYPE_CHECKING, Callable, List, Optional, Sequence\n\nimport torch\n\nfrom composer.utils.iter_helpers import ensure_tuple\n\nif TYPE_CHECKING:\n from composer.core.types import Batch, DataLoader\n\n__all__ = [\"DataSpec\"]\n\n\nclass DataSpec:\n \"\"\"Specifications for operating and training on data.\n\n An example of constructing a :class:`DataSpec` object with a ``device_transforms`` callable\n (:class:`~composer.datasets.utils.NormalizationFn`) and then using it with :class:`~.Trainer`:\n\n >>> # In this case, we apply NormalizationFn \n >>> # Construct DataSpec as shown below to apply this transformation\n >>> from composer.datasets.utils import NormalizationFn\n >>> CHANNEL_MEAN = (0.485 * 255, 0.456 * 255, 0.406 * 255)\n >>> CHANNEL_STD = (0.229 * 255, 0.224 * 255, 0.225 * 255)\n >>> device_transform_fn = NormalizationFn(mean=CHANNEL_MEAN, std=CHANNEL_STD)\n >>> train_dspec = DataSpec(train_dataloader, device_transforms=device_transform_fn)\n >>> # The same function can be used for eval dataloader as well\n >>> eval_dspec = DataSpec(eval_dataloader, device_transforms=device_transform_fn)\n >>> # Use this DataSpec object to construct trainer\n >>> trainer = Trainer(\n ... model=model,\n ... train_dataloader=train_dspec,\n ... eval_dataloader=eval_dspec,\n ... optimizers=optimizer,\n ... max_duration=\"1ep\",\n ... )\n\n Args:\n dataloader (DataLoader): The dataloader.\n\n num_samples (int, optional): The total number of samples in an epoch, across all ranks. This field is used by\n the :class:`~.time.Timer` (training progress tracker). If not specified, then ``len(dataloader.dataset)`` is\n used (if this property is available). Otherwise, the dataset is assumed to be unsized.\n\n num_tokens (int, optional): The total number of tokens in an epoch. This field is used by the\n :class:`~.time.Timer` (training progress tracker).\n\n device_transforms ((Batch) -> Batch, optional): Function called by the :class:`~.trainer.Trainer` to modify the\n batch once it has been moved onto the device. For example, this function can be used for GPU-based\n normalization. It can modify the batch in-place, and it should return the modified batch. If not specified, the\n batch is not modified.\n\n split_batch ((Batch, int) -> Sequence[Batch], optional): Function called by the :class:`~.trainer.Trainer` to\n split a batch (the first parameter) into the number of microbatches specified (the second parameter). By\n default, batches of type :attr:`~.types.BatchPair` can be split automatically. If the ``dataloader`` yields\n batches of a different type, then this function must be specified.\n\n get_num_samples_in_batch ((Batch) -> int, optional): Function that is called by the :class:`~.trainer.Trainer`\n to get the number of samples in the provided batch.\n\n By default, if the batch contains tensors that all have the same 0th dim, then the value of the 0th dim will\n be returned. If the batch contains tensors where the 0th dim differ, then this function must be specified.\n\n get_num_tokens_in_batch ((Batch) -> int, optional): Function that is called by the :class:`~.trainer.Trainer` to\n get the number of tokens in the provided batch.\n\n By default, it returns 0, meaning that number of tokens processed will not be tracked as a part of the\n training progress tracking. This function must be specified to track the number of tokens processed during\n training.\n \"\"\"\n\n def __init__(\n self,\n dataloader: DataLoader,\n num_samples: Optional[int] = None,\n num_tokens: Optional[int] = None,\n device_transforms: Optional[Callable[[Batch], Batch]] = None,\n split_batch: Optional[Callable[[Batch, int], Sequence[Batch]]] = None,\n get_num_samples_in_batch: Optional[Callable[[Batch], int]] = None,\n get_num_tokens_in_batch: Optional[Callable[[Batch], int]] = None,\n ) -> None:\n self.dataloader = dataloader\n self.num_tokens = num_tokens\n self.device_transforms = self._default_device_transforms if device_transforms is None else device_transforms\n self.split_batch = self._default_split_batch if split_batch is None else split_batch\n self.get_num_samples_in_batch = self._default_get_num_samples_in_batch if get_num_samples_in_batch is None else get_num_samples_in_batch\n self.get_num_tokens_in_batch = self._default_get_num_tokens_in_batch if get_num_tokens_in_batch is None else get_num_tokens_in_batch\n if num_samples is not None:\n self.num_samples = num_samples\n\n else:\n if isinstance(dataloader.dataset, collections.abc.Sized):\n try:\n self.num_samples = len(dataloader.dataset)\n except (TypeError, NotImplementedError):\n self.num_samples = None\n else:\n self.num_samples = None\n\n def _default_device_transforms(self, batch: Batch):\n return batch\n\n def _default_split_batch(self, batch: Batch, num_microbatches: int) -> Sequence[Batch]:\n if num_microbatches < 1:\n raise ValueError(\"num_microbatches must be at least 1\")\n if num_microbatches == 1:\n return [batch]\n if not isinstance(batch, Sequence):\n raise ValueError(f'split_fn requires batch be a tuple pair of tensors, got {type(batch)}')\n x, y = batch\n if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):\n return list(zip(x.chunk(num_microbatches), y.chunk(num_microbatches)))\n if isinstance(x, List) and isinstance(y, List):\n return list(\n zip(\n [x[i::num_microbatches] for i in range(num_microbatches)],\n [y[i::num_microbatches] for i in range(num_microbatches)],\n ))\n raise NotImplementedError(\n textwrap.dedent(\"\"\"\\\n The default split_fn is unable to split the output of this\n dataloader. Please use a DataSpec and specify `split_batch`.\"\"\"))\n\n def _default_get_num_samples_in_batch(self, batch: Batch) -> int:\n if isinstance(batch, torch.Tensor):\n return batch.shape[0]\n\n dim0_sizes = []\n if isinstance(batch, (list, tuple)):\n for tensors in batch:\n for t in ensure_tuple(tensors):\n dim0_sizes.append(t.shape[0])\n elif isinstance(batch, dict):\n dim0_sizes = [t.shape[0] for t in batch.values()]\n\n if len(set(dim0_sizes)) == 1:\n return dim0_sizes[0]\n else:\n raise NotImplementedError(\n textwrap.dedent(f\"\"\"\\\n Cannot determine the batch size, as multiple Tensors of\n different lengths were found in the batch: sizes in batch: {dim0_sizes}.\n Please use a DataSpec and specify `get_num_samples_in_batch`.\"\"\"))\n\n def _default_get_num_tokens_in_batch(self, batch: Batch) -> int:\n del batch # unused\n return 0\n", "path": "composer/core/data_spec.py"}]} | 2,302 | 160 |
gh_patches_debug_11514 | rasdani/github-patches | git_diff | plone__Products.CMFPlone-2915 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The TinyMCE pattern loads HTML as CSS and slows edit forms
## The TinyMCE pattern loads HTML as CSS and slows edit forms
### What I did:
Open an `@@edit` form with the browser's network inspector open.
### What I expect to happen:
It should load quickly and with the correct resources.
### What actually happened:
For every field using the TinyMCE pattern, there was a network request for the portal root rendered at HTML but loaded as CSS. This takes a lot of time and greatly slows the page load time.
### What version of Plone/ Addons I am using:
Products.CMFPlone==5.1.5
plone.app.theming==2.0.5
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `Products/CMFPlone/patterns/tinymce.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from lxml import html
3 from plone.app.layout.navigation.root import getNavigationRootObject
4 from plone.app.theming.utils import theming_policy
5 from plone.registry.interfaces import IRegistry
6 from Products.CMFCore.utils import getToolByName
7 from Products.CMFPlone.interfaces import IFilterSchema
8 from Products.CMFPlone.interfaces import ITinyMCESchema
9 from Products.CMFPlone.utils import get_portal
10 from zope.component import getUtility
11
12 import json
13
14
15 class TinyMCESettingsGenerator(object):
16
17 def __init__(self, context, request):
18 self.context = context
19 self.request = request
20 self.settings = getUtility(IRegistry).forInterface(
21 ITinyMCESchema,
22 prefix="plone",
23 check=False
24 )
25 self.filter_settings = getUtility(IRegistry).forInterface(
26 IFilterSchema,
27 prefix="plone",
28 check=False
29 )
30 self.nav_root = getNavigationRootObject(
31 self.context,
32 get_portal(),
33 )
34 self.nav_root_url = self.nav_root.absolute_url()
35
36 def get_theme(self):
37 return theming_policy().get_theme()
38
39 def get_content_css(self, style_css=''):
40 files = [
41 '{0}/++plone++static/plone-compiled.css'.format(self.nav_root_url)
42 ]
43 if style_css:
44 files.extend(style_css.split(','))
45 content_css = self.settings.content_css or []
46 for url in content_css:
47 if url and url.strip():
48 files.append('/'.join([self.nav_root_url, url.strip()]))
49 theme = self.get_theme()
50 tinymce_content_css = getattr(theme, 'tinymce_content_css', None)
51 if tinymce_content_css is not None:
52 for path in theme.tinymce_content_css.split(','):
53 if path.startswith('http://') or path.startswith('https://'):
54 files.append(path)
55 else:
56 files.append(self.nav_root_url + path)
57
58 return ','.join(files)
59
60 def get_style_format(self, txt, _type='format', base=None):
61 parts = txt.strip().split('|')
62 if len(parts) < 2:
63 return
64 if base is None:
65 val = {}
66 else:
67 val = base.copy()
68 val.update({
69 'title': parts[0],
70 _type: parts[1]
71 })
72 if len(parts) > 2:
73 val['icon'] = parts[2]
74 return val
75
76 def get_styles(self, styles, _type='format', base=None):
77 result = []
78 for style in styles:
79 style = self.get_style_format(style, _type, base)
80 if not style:
81 continue
82 result.append(style)
83 return result
84
85 def get_all_style_formats(self):
86 header_styles = self.settings.header_styles or []
87 block_styles = self.settings.block_styles or []
88 inline_styles = self.settings.inline_styles or []
89 alignment_styles = self.settings.alignment_styles or []
90 table_styles = self.settings.table_styles or []
91 return [{
92 'title': 'Headers',
93 'items': self.get_styles(header_styles)
94 }, {
95 'title': 'Block',
96 'items': self.get_styles(block_styles)
97 }, {
98 'title': 'Inline',
99 'items': self.get_styles(inline_styles)
100 }, {
101 'title': 'Alignment',
102 'items': self.get_styles(alignment_styles)
103 }, {
104 'title': 'Tables',
105 'items': self.get_styles(
106 table_styles, 'classes', {'selector': 'table'})
107 }]
108
109 def get_tiny_config(self):
110 settings = self.settings
111 importcss_file_filter = '%s/++plone++static/tinymce-styles.css' % (
112 self.nav_root_url
113 )
114
115 theme = self.get_theme()
116 if theme and getattr(theme, 'tinymce_styles_css', None):
117 importcss_file_filter += ',%s/%s' % (
118 self.nav_root_url,
119 theme.tinymce_styles_css.lstrip('/'))
120
121 tiny_config = {
122 'resize': 'both' if settings.resizing else False,
123 'content_css': self.get_content_css(importcss_file_filter),
124 'plugins': [
125 'plonelink',
126 'ploneimage',
127 'importcss'
128 ] + settings.plugins,
129 'external_plugins': {},
130 'toolbar': settings.toolbar,
131 'entity_encoding': settings.entity_encoding,
132 'importcss_append': True,
133 'importcss_file_filter': importcss_file_filter,
134 'browser_spellcheck': True
135 }
136 toolbar_additions = settings.custom_buttons or []
137
138 if settings.editor_height:
139 tiny_config['height'] = settings.editor_height
140 if settings.autoresize:
141 tiny_config['plugins'].append('autoresize')
142 tiny_config['autoresize_max_height'] = 1000 # hard coded?
143 if settings.editor_width:
144 tiny_config['width'] = settings.editor_width
145
146 # specific plugin options
147 if 'contextmenu' in settings.plugins:
148 tiny_config['contextmenu'] = "plonelink ploneimage inserttable |"\
149 " cell row column deletetable"
150
151 if settings.libraries_spellchecker_choice == 'AtD':
152 mtool = getToolByName(self.context, 'portal_membership')
153 member = mtool.getAuthenticatedMember()
154 member_id = member.getId()
155 if member_id:
156 if 'compat3x' not in tiny_config['plugins']:
157 tiny_config['plugins'].append('compat3x')
158 tiny_config['external_plugins']['AtD'] = (
159 '{0}/++plone++static/tinymce-AtD-plugin/'
160 'editor_plugin.js'.format(self.nav_root_url)
161 )
162 # None when Anonymous User
163 tiny_config['atd_rpc_id'] = 'plone-' + member_id
164 tiny_config['atd_rpc_url'] = self.nav_root_url
165 tiny_config['atd_show_types'] = ','.join(
166 settings.libraries_atd_show_types
167 )
168 tiny_config['atd_ignore_strings'] = ','.join(
169 settings.libraries_atd_ignore_strings
170 )
171 toolbar_additions.append('AtD')
172 elif settings.libraries_spellchecker_choice == 'AtD':
173 tiny_config['browser_spellcheck'] = True
174
175 if toolbar_additions:
176 tiny_config['toolbar'] += ' | {0}'.format(
177 ' '.join(toolbar_additions)
178 )
179
180 for plugin in settings.custom_plugins or []:
181 parts = plugin.split('|')
182 if len(parts) != 2:
183 continue
184 tiny_config['external_plugins'][parts[0]] = parts[1]
185
186 tiny_config['style_formats'] = self.get_all_style_formats()
187 if settings.formats:
188 try:
189 tiny_config['formats'] = json.loads(settings.formats)
190 except ValueError:
191 pass
192
193 if settings.menubar:
194 tiny_config['menubar'] = settings.menubar
195 if settings.menu:
196 try:
197 tiny_config['menu'] = json.loads(settings.menu)
198 except ValueError:
199 pass
200
201 if hasattr(settings, 'templates') and settings.templates:
202 try:
203 tiny_config['templates'] = json.loads(settings.templates)
204 except ValueError:
205 pass
206
207 # add safe_html settings, which are useed in backend for filtering:
208 if not self.filter_settings.disable_filtering:
209 valid_tags = self.filter_settings.valid_tags
210 nasty_tags = self.filter_settings.nasty_tags
211 custom_attributes = self.filter_settings.custom_attributes
212 safe_attributes = [attr.decode() for attr in html.defs.safe_attrs]
213 valid_attributes = safe_attributes + custom_attributes
214 # valid_elements : 'a[href|target=_blank],strong/b,div[align],br'
215 tiny_valid_elements = []
216 for tag in valid_tags:
217 tag_str = "%s[%s]" % (tag, "|".join(valid_attributes))
218 tiny_valid_elements.append(tag_str)
219 # We want to remove the nasty tag including the content in the
220 # backend, so TinyMCE should allow them here.
221 for tag in nasty_tags:
222 tag_str = "%s[%s]" % (tag, "|".join(valid_attributes))
223 tiny_valid_elements.append(tag_str)
224 tiny_config['valid_elements'] = ",".join(tiny_valid_elements)
225
226 if settings.other_settings:
227 try:
228 tiny_config.update(json.loads(settings.other_settings))
229 except ValueError:
230 pass
231
232 return tiny_config
233
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/Products/CMFPlone/patterns/tinymce.py b/Products/CMFPlone/patterns/tinymce.py
--- a/Products/CMFPlone/patterns/tinymce.py
+++ b/Products/CMFPlone/patterns/tinymce.py
@@ -48,7 +48,7 @@
files.append('/'.join([self.nav_root_url, url.strip()]))
theme = self.get_theme()
tinymce_content_css = getattr(theme, 'tinymce_content_css', None)
- if tinymce_content_css is not None:
+ if tinymce_content_css:
for path in theme.tinymce_content_css.split(','):
if path.startswith('http://') or path.startswith('https://'):
files.append(path)
| {"golden_diff": "diff --git a/Products/CMFPlone/patterns/tinymce.py b/Products/CMFPlone/patterns/tinymce.py\n--- a/Products/CMFPlone/patterns/tinymce.py\n+++ b/Products/CMFPlone/patterns/tinymce.py\n@@ -48,7 +48,7 @@\n files.append('/'.join([self.nav_root_url, url.strip()]))\n theme = self.get_theme()\n tinymce_content_css = getattr(theme, 'tinymce_content_css', None)\n- if tinymce_content_css is not None:\n+ if tinymce_content_css:\n for path in theme.tinymce_content_css.split(','):\n if path.startswith('http://') or path.startswith('https://'):\n files.append(path)\n", "issue": "The TinyMCE pattern loads HTML as CSS and slows edit forms\n## The TinyMCE pattern loads HTML as CSS and slows edit forms\r\n\r\n### What I did:\r\n\r\nOpen an `@@edit` form with the browser's network inspector open.\r\n\r\n### What I expect to happen:\r\n\r\nIt should load quickly and with the correct resources.\r\n\r\n### What actually happened:\r\n\r\nFor every field using the TinyMCE pattern, there was a network request for the portal root rendered at HTML but loaded as CSS. This takes a lot of time and greatly slows the page load time.\r\n\r\n### What version of Plone/ Addons I am using:\r\n\r\nProducts.CMFPlone==5.1.5\r\nplone.app.theming==2.0.5\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lxml import html\nfrom plone.app.layout.navigation.root import getNavigationRootObject\nfrom plone.app.theming.utils import theming_policy\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import IFilterSchema\nfrom Products.CMFPlone.interfaces import ITinyMCESchema\nfrom Products.CMFPlone.utils import get_portal\nfrom zope.component import getUtility\n\nimport json\n\n\nclass TinyMCESettingsGenerator(object):\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.settings = getUtility(IRegistry).forInterface(\n ITinyMCESchema,\n prefix=\"plone\",\n check=False\n )\n self.filter_settings = getUtility(IRegistry).forInterface(\n IFilterSchema,\n prefix=\"plone\",\n check=False\n )\n self.nav_root = getNavigationRootObject(\n self.context,\n get_portal(),\n )\n self.nav_root_url = self.nav_root.absolute_url()\n\n def get_theme(self):\n return theming_policy().get_theme()\n\n def get_content_css(self, style_css=''):\n files = [\n '{0}/++plone++static/plone-compiled.css'.format(self.nav_root_url)\n ]\n if style_css:\n files.extend(style_css.split(','))\n content_css = self.settings.content_css or []\n for url in content_css:\n if url and url.strip():\n files.append('/'.join([self.nav_root_url, url.strip()]))\n theme = self.get_theme()\n tinymce_content_css = getattr(theme, 'tinymce_content_css', None)\n if tinymce_content_css is not None:\n for path in theme.tinymce_content_css.split(','):\n if path.startswith('http://') or path.startswith('https://'):\n files.append(path)\n else:\n files.append(self.nav_root_url + path)\n\n return ','.join(files)\n\n def get_style_format(self, txt, _type='format', base=None):\n parts = txt.strip().split('|')\n if len(parts) < 2:\n return\n if base is None:\n val = {}\n else:\n val = base.copy()\n val.update({\n 'title': parts[0],\n _type: parts[1]\n })\n if len(parts) > 2:\n val['icon'] = parts[2]\n return val\n\n def get_styles(self, styles, _type='format', base=None):\n result = []\n for style in styles:\n style = self.get_style_format(style, _type, base)\n if not style:\n continue\n result.append(style)\n return result\n\n def get_all_style_formats(self):\n header_styles = self.settings.header_styles or []\n block_styles = self.settings.block_styles or []\n inline_styles = self.settings.inline_styles or []\n alignment_styles = self.settings.alignment_styles or []\n table_styles = self.settings.table_styles or []\n return [{\n 'title': 'Headers',\n 'items': self.get_styles(header_styles)\n }, {\n 'title': 'Block',\n 'items': self.get_styles(block_styles)\n }, {\n 'title': 'Inline',\n 'items': self.get_styles(inline_styles)\n }, {\n 'title': 'Alignment',\n 'items': self.get_styles(alignment_styles)\n }, {\n 'title': 'Tables',\n 'items': self.get_styles(\n table_styles, 'classes', {'selector': 'table'})\n }]\n\n def get_tiny_config(self):\n settings = self.settings\n importcss_file_filter = '%s/++plone++static/tinymce-styles.css' % (\n self.nav_root_url\n )\n\n theme = self.get_theme()\n if theme and getattr(theme, 'tinymce_styles_css', None):\n importcss_file_filter += ',%s/%s' % (\n self.nav_root_url,\n theme.tinymce_styles_css.lstrip('/'))\n\n tiny_config = {\n 'resize': 'both' if settings.resizing else False,\n 'content_css': self.get_content_css(importcss_file_filter),\n 'plugins': [\n 'plonelink',\n 'ploneimage',\n 'importcss'\n ] + settings.plugins,\n 'external_plugins': {},\n 'toolbar': settings.toolbar,\n 'entity_encoding': settings.entity_encoding,\n 'importcss_append': True,\n 'importcss_file_filter': importcss_file_filter,\n 'browser_spellcheck': True\n }\n toolbar_additions = settings.custom_buttons or []\n\n if settings.editor_height:\n tiny_config['height'] = settings.editor_height\n if settings.autoresize:\n tiny_config['plugins'].append('autoresize')\n tiny_config['autoresize_max_height'] = 1000 # hard coded?\n if settings.editor_width:\n tiny_config['width'] = settings.editor_width\n\n # specific plugin options\n if 'contextmenu' in settings.plugins:\n tiny_config['contextmenu'] = \"plonelink ploneimage inserttable |\"\\\n \" cell row column deletetable\"\n\n if settings.libraries_spellchecker_choice == 'AtD':\n mtool = getToolByName(self.context, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n member_id = member.getId()\n if member_id:\n if 'compat3x' not in tiny_config['plugins']:\n tiny_config['plugins'].append('compat3x')\n tiny_config['external_plugins']['AtD'] = (\n '{0}/++plone++static/tinymce-AtD-plugin/'\n 'editor_plugin.js'.format(self.nav_root_url)\n )\n # None when Anonymous User\n tiny_config['atd_rpc_id'] = 'plone-' + member_id\n tiny_config['atd_rpc_url'] = self.nav_root_url\n tiny_config['atd_show_types'] = ','.join(\n settings.libraries_atd_show_types\n )\n tiny_config['atd_ignore_strings'] = ','.join(\n settings.libraries_atd_ignore_strings\n )\n toolbar_additions.append('AtD')\n elif settings.libraries_spellchecker_choice == 'AtD':\n tiny_config['browser_spellcheck'] = True\n\n if toolbar_additions:\n tiny_config['toolbar'] += ' | {0}'.format(\n ' '.join(toolbar_additions)\n )\n\n for plugin in settings.custom_plugins or []:\n parts = plugin.split('|')\n if len(parts) != 2:\n continue\n tiny_config['external_plugins'][parts[0]] = parts[1]\n\n tiny_config['style_formats'] = self.get_all_style_formats()\n if settings.formats:\n try:\n tiny_config['formats'] = json.loads(settings.formats)\n except ValueError:\n pass\n\n if settings.menubar:\n tiny_config['menubar'] = settings.menubar\n if settings.menu:\n try:\n tiny_config['menu'] = json.loads(settings.menu)\n except ValueError:\n pass\n\n if hasattr(settings, 'templates') and settings.templates:\n try:\n tiny_config['templates'] = json.loads(settings.templates)\n except ValueError:\n pass\n\n # add safe_html settings, which are useed in backend for filtering:\n if not self.filter_settings.disable_filtering:\n valid_tags = self.filter_settings.valid_tags\n nasty_tags = self.filter_settings.nasty_tags\n custom_attributes = self.filter_settings.custom_attributes\n safe_attributes = [attr.decode() for attr in html.defs.safe_attrs]\n valid_attributes = safe_attributes + custom_attributes\n # valid_elements : 'a[href|target=_blank],strong/b,div[align],br'\n tiny_valid_elements = []\n for tag in valid_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n # We want to remove the nasty tag including the content in the\n # backend, so TinyMCE should allow them here.\n for tag in nasty_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n tiny_config['valid_elements'] = \",\".join(tiny_valid_elements)\n\n if settings.other_settings:\n try:\n tiny_config.update(json.loads(settings.other_settings))\n except ValueError:\n pass\n\n return tiny_config\n", "path": "Products/CMFPlone/patterns/tinymce.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom lxml import html\nfrom plone.app.layout.navigation.root import getNavigationRootObject\nfrom plone.app.theming.utils import theming_policy\nfrom plone.registry.interfaces import IRegistry\nfrom Products.CMFCore.utils import getToolByName\nfrom Products.CMFPlone.interfaces import IFilterSchema\nfrom Products.CMFPlone.interfaces import ITinyMCESchema\nfrom Products.CMFPlone.utils import get_portal\nfrom zope.component import getUtility\n\nimport json\n\n\nclass TinyMCESettingsGenerator(object):\n\n def __init__(self, context, request):\n self.context = context\n self.request = request\n self.settings = getUtility(IRegistry).forInterface(\n ITinyMCESchema,\n prefix=\"plone\",\n check=False\n )\n self.filter_settings = getUtility(IRegistry).forInterface(\n IFilterSchema,\n prefix=\"plone\",\n check=False\n )\n self.nav_root = getNavigationRootObject(\n self.context,\n get_portal(),\n )\n self.nav_root_url = self.nav_root.absolute_url()\n\n def get_theme(self):\n return theming_policy().get_theme()\n\n def get_content_css(self, style_css=''):\n files = [\n '{0}/++plone++static/plone-compiled.css'.format(self.nav_root_url)\n ]\n if style_css:\n files.extend(style_css.split(','))\n content_css = self.settings.content_css or []\n for url in content_css:\n if url and url.strip():\n files.append('/'.join([self.nav_root_url, url.strip()]))\n theme = self.get_theme()\n tinymce_content_css = getattr(theme, 'tinymce_content_css', None)\n if tinymce_content_css:\n for path in theme.tinymce_content_css.split(','):\n if path.startswith('http://') or path.startswith('https://'):\n files.append(path)\n else:\n files.append(self.nav_root_url + path)\n\n return ','.join(files)\n\n def get_style_format(self, txt, _type='format', base=None):\n parts = txt.strip().split('|')\n if len(parts) < 2:\n return\n if base is None:\n val = {}\n else:\n val = base.copy()\n val.update({\n 'title': parts[0],\n _type: parts[1]\n })\n if len(parts) > 2:\n val['icon'] = parts[2]\n return val\n\n def get_styles(self, styles, _type='format', base=None):\n result = []\n for style in styles:\n style = self.get_style_format(style, _type, base)\n if not style:\n continue\n result.append(style)\n return result\n\n def get_all_style_formats(self):\n header_styles = self.settings.header_styles or []\n block_styles = self.settings.block_styles or []\n inline_styles = self.settings.inline_styles or []\n alignment_styles = self.settings.alignment_styles or []\n table_styles = self.settings.table_styles or []\n return [{\n 'title': 'Headers',\n 'items': self.get_styles(header_styles)\n }, {\n 'title': 'Block',\n 'items': self.get_styles(block_styles)\n }, {\n 'title': 'Inline',\n 'items': self.get_styles(inline_styles)\n }, {\n 'title': 'Alignment',\n 'items': self.get_styles(alignment_styles)\n }, {\n 'title': 'Tables',\n 'items': self.get_styles(\n table_styles, 'classes', {'selector': 'table'})\n }]\n\n def get_tiny_config(self):\n settings = self.settings\n importcss_file_filter = '%s/++plone++static/tinymce-styles.css' % (\n self.nav_root_url\n )\n\n theme = self.get_theme()\n if theme and getattr(theme, 'tinymce_styles_css', None):\n importcss_file_filter += ',%s/%s' % (\n self.nav_root_url,\n theme.tinymce_styles_css.lstrip('/'))\n\n tiny_config = {\n 'resize': 'both' if settings.resizing else False,\n 'content_css': self.get_content_css(importcss_file_filter),\n 'plugins': [\n 'plonelink',\n 'ploneimage',\n 'importcss'\n ] + settings.plugins,\n 'external_plugins': {},\n 'toolbar': settings.toolbar,\n 'entity_encoding': settings.entity_encoding,\n 'importcss_append': True,\n 'importcss_file_filter': importcss_file_filter,\n 'browser_spellcheck': True\n }\n toolbar_additions = settings.custom_buttons or []\n\n if settings.editor_height:\n tiny_config['height'] = settings.editor_height\n if settings.autoresize:\n tiny_config['plugins'].append('autoresize')\n tiny_config['autoresize_max_height'] = 1000 # hard coded?\n if settings.editor_width:\n tiny_config['width'] = settings.editor_width\n\n # specific plugin options\n if 'contextmenu' in settings.plugins:\n tiny_config['contextmenu'] = \"plonelink ploneimage inserttable |\"\\\n \" cell row column deletetable\"\n\n if settings.libraries_spellchecker_choice == 'AtD':\n mtool = getToolByName(self.context, 'portal_membership')\n member = mtool.getAuthenticatedMember()\n member_id = member.getId()\n if member_id:\n if 'compat3x' not in tiny_config['plugins']:\n tiny_config['plugins'].append('compat3x')\n tiny_config['external_plugins']['AtD'] = (\n '{0}/++plone++static/tinymce-AtD-plugin/'\n 'editor_plugin.js'.format(self.nav_root_url)\n )\n # None when Anonymous User\n tiny_config['atd_rpc_id'] = 'plone-' + member_id\n tiny_config['atd_rpc_url'] = self.nav_root_url\n tiny_config['atd_show_types'] = ','.join(\n settings.libraries_atd_show_types\n )\n tiny_config['atd_ignore_strings'] = ','.join(\n settings.libraries_atd_ignore_strings\n )\n toolbar_additions.append('AtD')\n elif settings.libraries_spellchecker_choice == 'AtD':\n tiny_config['browser_spellcheck'] = True\n\n if toolbar_additions:\n tiny_config['toolbar'] += ' | {0}'.format(\n ' '.join(toolbar_additions)\n )\n\n for plugin in settings.custom_plugins or []:\n parts = plugin.split('|')\n if len(parts) != 2:\n continue\n tiny_config['external_plugins'][parts[0]] = parts[1]\n\n tiny_config['style_formats'] = self.get_all_style_formats()\n if settings.formats:\n try:\n tiny_config['formats'] = json.loads(settings.formats)\n except ValueError:\n pass\n\n if settings.menubar:\n tiny_config['menubar'] = settings.menubar\n if settings.menu:\n try:\n tiny_config['menu'] = json.loads(settings.menu)\n except ValueError:\n pass\n\n if hasattr(settings, 'templates') and settings.templates:\n try:\n tiny_config['templates'] = json.loads(settings.templates)\n except ValueError:\n pass\n\n # add safe_html settings, which are useed in backend for filtering:\n if not self.filter_settings.disable_filtering:\n valid_tags = self.filter_settings.valid_tags\n nasty_tags = self.filter_settings.nasty_tags\n custom_attributes = self.filter_settings.custom_attributes\n safe_attributes = [attr.decode() for attr in html.defs.safe_attrs]\n valid_attributes = safe_attributes + custom_attributes\n # valid_elements : 'a[href|target=_blank],strong/b,div[align],br'\n tiny_valid_elements = []\n for tag in valid_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n # We want to remove the nasty tag including the content in the\n # backend, so TinyMCE should allow them here.\n for tag in nasty_tags:\n tag_str = \"%s[%s]\" % (tag, \"|\".join(valid_attributes))\n tiny_valid_elements.append(tag_str)\n tiny_config['valid_elements'] = \",\".join(tiny_valid_elements)\n\n if settings.other_settings:\n try:\n tiny_config.update(json.loads(settings.other_settings))\n except ValueError:\n pass\n\n return tiny_config\n", "path": "Products/CMFPlone/patterns/tinymce.py"}]} | 2,813 | 172 |
gh_patches_debug_36331 | rasdani/github-patches | git_diff | streamlink__streamlink-3142 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sportschau plugin fails with "Unable to parse manifest XML" error
## Plugin Issue
- [x ] This is a plugin issue and I have read the contribution guidelines.
### Description
streamlink errors out when trying to watch a stream on sportschau.de, e.g. https://www.sportschau.de/tourdefrance/live/videostream-livestream---die--etappe-der-tour-de-france-nach-privas-100.html. It errors out with: "error: Unable to parse manifest XML: syntax error: line 1, column 0 (b'#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X ...)"
### Reproduction steps / Explicit stream URLs to test
1. streamlink "https://www.sportschau.de/tourdefrance/live/videostream-livestream---die--etappe-der-tour-de-france-nach-privas-100.html"
### Log output
```
[14:25:23,464][cli][debug] OS: Linux-5.8.4-x86_64-with-glibc2.2.5
[14:25:23,464][cli][debug] Python: 3.8.5
[14:25:23,464][cli][debug] Streamlink: 1.5.0
[14:25:23,464][cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)
[14:25:23,465][cli][info] Found matching plugin sportschau for URL https://www.sportschau.de/tourdefrance/live/videostream-livestream---die--etappe-der-tour-de-france-nach-privas-100.html
[14:25:23,734][plugin.sportschau][info] Found player js http://deviceids-medp.wdr.de/ondemand/221/2214170.js
error: Unable to parse manifest XML: syntax error: line 1, column 0 (b'#EXTM3U\n#EXT-X-VERSION:3\n#EXT-X ...)
```
### Additional comments, screenshots, etc.
Not sure that I understand the cause of the error, especially as the problematic part seems truncated. This is what the .m3u file looks like:
```
#EXTM3U
#EXT-X-VERSION:3
#EXT-X-INDEPENDENT-SEGMENTS
#EXT-X-STREAM-INF:BANDWIDTH=5388416,AVERAGE-BANDWIDTH=4048000,CODECS="avc1.640020,mp4a.40.2",RESOLUTION=1280x720,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_3680.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=5388416,AVERAGE-BANDWIDTH=4048000,CODECS="avc1.640020,mp4a.40.2",RESOLUTION=1280x720,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_3680.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=2758800,AVERAGE-BANDWIDTH=2085600,CODECS="avc1.4d401f,mp4a.40.2",RESOLUTION=960x540,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_1896.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=2758800,AVERAGE-BANDWIDTH=2085600,CODECS="avc1.4d401f,mp4a.40.2",RESOLUTION=960x540,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_1896.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=1614976,AVERAGE-BANDWIDTH=1232000,CODECS="avc1.4d401f,mp4a.40.2",RESOLUTION=640x360,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_1120.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=1614976,AVERAGE-BANDWIDTH=1232000,CODECS="avc1.4d401f,mp4a.40.2",RESOLUTION=640x360,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_1120.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=860288,AVERAGE-BANDWIDTH=668800,CODECS="avc1.77.30,mp4a.40.2",RESOLUTION=512x288,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_608.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=860288,AVERAGE-BANDWIDTH=668800,CODECS="avc1.77.30,mp4a.40.2",RESOLUTION=512x288,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_608.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=482944,AVERAGE-BANDWIDTH=387200,CODECS="avc1.66.30,mp4a.40.2",RESOLUTION=480x270,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_352.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=482944,AVERAGE-BANDWIDTH=387200,CODECS="avc1.66.30,mp4a.40.2",RESOLUTION=480x270,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_352.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=294272,AVERAGE-BANDWIDTH=246400,CODECS="avc1.42c015,mp4a.40.2",RESOLUTION=320x180,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_224.m3u8
#EXT-X-STREAM-INF:BANDWIDTH=294272,AVERAGE-BANDWIDTH=246400,CODECS="avc1.42c015,mp4a.40.2",RESOLUTION=320x180,FRAME-RATE=50.000
https://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_224.m3u8
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/sportschau.py`
Content:
```
1 import re
2 import json
3
4 from streamlink.plugin import Plugin
5 from streamlink.stream import HDSStream
6 from streamlink.utils import update_scheme
7
8 _url_re = re.compile(r"http(s)?://(\w+\.)?sportschau.de/")
9 _player_js = re.compile(r"https?://deviceids-medp.wdr.de/ondemand/.*\.js")
10
11
12 class sportschau(Plugin):
13 @classmethod
14 def can_handle_url(cls, url):
15 return _url_re.match(url)
16
17 def _get_streams(self):
18 res = self.session.http.get(self.url)
19 match = _player_js.search(res.text)
20 if match:
21 player_js = match.group(0)
22 self.logger.info("Found player js {0}", player_js)
23 else:
24 self.logger.info("Didn't find player js. Probably this page doesn't contain a video")
25 return
26
27 res = self.session.http.get(player_js)
28
29 jsonp_start = res.text.find('(') + 1
30 jsonp_end = res.text.rfind(')')
31
32 if jsonp_start <= 0 or jsonp_end <= 0:
33 self.logger.info("Couldn't extract json metadata from player.js: {0}", player_js)
34 return
35
36 json_s = res.text[jsonp_start:jsonp_end]
37
38 stream_metadata = json.loads(json_s)
39
40 hds_url = stream_metadata['mediaResource']['dflt']['videoURL']
41 hds_url = update_scheme(self.url, hds_url)
42
43 return HDSStream.parse_manifest(self.session, hds_url).items()
44
45
46 __plugin__ = sportschau
47
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/sportschau.py b/src/streamlink/plugins/sportschau.py
--- a/src/streamlink/plugins/sportschau.py
+++ b/src/streamlink/plugins/sportschau.py
@@ -1,46 +1,52 @@
-import re
-import json
-
-from streamlink.plugin import Plugin
-from streamlink.stream import HDSStream
-from streamlink.utils import update_scheme
-
-_url_re = re.compile(r"http(s)?://(\w+\.)?sportschau.de/")
-_player_js = re.compile(r"https?://deviceids-medp.wdr.de/ondemand/.*\.js")
-
-
-class sportschau(Plugin):
- @classmethod
- def can_handle_url(cls, url):
- return _url_re.match(url)
-
- def _get_streams(self):
- res = self.session.http.get(self.url)
- match = _player_js.search(res.text)
- if match:
- player_js = match.group(0)
- self.logger.info("Found player js {0}", player_js)
- else:
- self.logger.info("Didn't find player js. Probably this page doesn't contain a video")
- return
-
- res = self.session.http.get(player_js)
-
- jsonp_start = res.text.find('(') + 1
- jsonp_end = res.text.rfind(')')
-
- if jsonp_start <= 0 or jsonp_end <= 0:
- self.logger.info("Couldn't extract json metadata from player.js: {0}", player_js)
- return
-
- json_s = res.text[jsonp_start:jsonp_end]
-
- stream_metadata = json.loads(json_s)
-
- hds_url = stream_metadata['mediaResource']['dflt']['videoURL']
- hds_url = update_scheme(self.url, hds_url)
-
- return HDSStream.parse_manifest(self.session, hds_url).items()
-
-
-__plugin__ = sportschau
+import logging
+import re
+
+from streamlink.plugin import Plugin
+from streamlink.plugin.api import validate
+from streamlink.stream import HLSStream
+from streamlink.utils import parse_json, update_scheme
+
+log = logging.getLogger(__name__)
+
+
+class Sportschau(Plugin):
+ _re_url = re.compile(r"https?://(?:\w+\.)*sportschau.de/")
+
+ _re_player = re.compile(r"https?:(//deviceids-medp.wdr.de/ondemand/\S+\.js)")
+ _re_json = re.compile(r"\$mediaObject.jsonpHelper.storeAndPlay\(({.+})\);?")
+
+ _schema_player = validate.Schema(
+ validate.transform(_re_player.search),
+ validate.any(None, validate.Schema(
+ validate.get(1),
+ validate.transform(lambda url: update_scheme("https:", url))
+ ))
+ )
+ _schema_json = validate.Schema(
+ validate.transform(_re_json.match),
+ validate.get(1),
+ validate.transform(parse_json),
+ validate.get("mediaResource"),
+ validate.get("dflt"),
+ validate.get("videoURL"),
+ validate.transform(lambda url: update_scheme("https:", url))
+ )
+
+ @classmethod
+ def can_handle_url(cls, url):
+ return cls._re_url.match(url) is not None
+
+ def _get_streams(self):
+ player_js = self.session.http.get(self.url, schema=self._schema_player)
+ if not player_js:
+ return
+
+ log.debug("Found player js {0}".format(player_js))
+
+ hls_url = self.session.http.get(player_js, schema=self._schema_json)
+
+ for stream in HLSStream.parse_variant_playlist(self.session, hls_url).items():
+ yield stream
+
+
+__plugin__ = Sportschau
| {"golden_diff": "diff --git a/src/streamlink/plugins/sportschau.py b/src/streamlink/plugins/sportschau.py\n--- a/src/streamlink/plugins/sportschau.py\n+++ b/src/streamlink/plugins/sportschau.py\n@@ -1,46 +1,52 @@\n-import re\r\n-import json\r\n-\r\n-from streamlink.plugin import Plugin\r\n-from streamlink.stream import HDSStream\r\n-from streamlink.utils import update_scheme\r\n-\r\n-_url_re = re.compile(r\"http(s)?://(\\w+\\.)?sportschau.de/\")\r\n-_player_js = re.compile(r\"https?://deviceids-medp.wdr.de/ondemand/.*\\.js\")\r\n-\r\n-\r\n-class sportschau(Plugin):\r\n- @classmethod\r\n- def can_handle_url(cls, url):\r\n- return _url_re.match(url)\r\n-\r\n- def _get_streams(self):\r\n- res = self.session.http.get(self.url)\r\n- match = _player_js.search(res.text)\r\n- if match:\r\n- player_js = match.group(0)\r\n- self.logger.info(\"Found player js {0}\", player_js)\r\n- else:\r\n- self.logger.info(\"Didn't find player js. Probably this page doesn't contain a video\")\r\n- return\r\n-\r\n- res = self.session.http.get(player_js)\r\n-\r\n- jsonp_start = res.text.find('(') + 1\r\n- jsonp_end = res.text.rfind(')')\r\n-\r\n- if jsonp_start <= 0 or jsonp_end <= 0:\r\n- self.logger.info(\"Couldn't extract json metadata from player.js: {0}\", player_js)\r\n- return\r\n-\r\n- json_s = res.text[jsonp_start:jsonp_end]\r\n-\r\n- stream_metadata = json.loads(json_s)\r\n-\r\n- hds_url = stream_metadata['mediaResource']['dflt']['videoURL']\r\n- hds_url = update_scheme(self.url, hds_url)\r\n-\r\n- return HDSStream.parse_manifest(self.session, hds_url).items()\r\n-\r\n-\r\n-__plugin__ = sportschau\r\n+import logging\n+import re\n+\n+from streamlink.plugin import Plugin\n+from streamlink.plugin.api import validate\n+from streamlink.stream import HLSStream\n+from streamlink.utils import parse_json, update_scheme\n+\n+log = logging.getLogger(__name__)\n+\n+\n+class Sportschau(Plugin):\n+ _re_url = re.compile(r\"https?://(?:\\w+\\.)*sportschau.de/\")\n+\n+ _re_player = re.compile(r\"https?:(//deviceids-medp.wdr.de/ondemand/\\S+\\.js)\")\n+ _re_json = re.compile(r\"\\$mediaObject.jsonpHelper.storeAndPlay\\(({.+})\\);?\")\n+\n+ _schema_player = validate.Schema(\n+ validate.transform(_re_player.search),\n+ validate.any(None, validate.Schema(\n+ validate.get(1),\n+ validate.transform(lambda url: update_scheme(\"https:\", url))\n+ ))\n+ )\n+ _schema_json = validate.Schema(\n+ validate.transform(_re_json.match),\n+ validate.get(1),\n+ validate.transform(parse_json),\n+ validate.get(\"mediaResource\"),\n+ validate.get(\"dflt\"),\n+ validate.get(\"videoURL\"),\n+ validate.transform(lambda url: update_scheme(\"https:\", url))\n+ )\n+\n+ @classmethod\n+ def can_handle_url(cls, url):\n+ return cls._re_url.match(url) is not None\n+\n+ def _get_streams(self):\n+ player_js = self.session.http.get(self.url, schema=self._schema_player)\n+ if not player_js:\n+ return\n+\n+ log.debug(\"Found player js {0}\".format(player_js))\n+\n+ hls_url = self.session.http.get(player_js, schema=self._schema_json)\n+\n+ for stream in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n+ yield stream\n+\n+\n+__plugin__ = Sportschau\n", "issue": "sportschau plugin fails with \"Unable to parse manifest XML\" error\n\r\n## Plugin Issue\r\n\r\n\r\n- [x ] This is a plugin issue and I have read the contribution guidelines.\r\n\r\n\r\n### Description\r\n\r\nstreamlink errors out when trying to watch a stream on sportschau.de, e.g. https://www.sportschau.de/tourdefrance/live/videostream-livestream---die--etappe-der-tour-de-france-nach-privas-100.html. It errors out with: \"error: Unable to parse manifest XML: syntax error: line 1, column 0 (b'#EXTM3U\\n#EXT-X-VERSION:3\\n#EXT-X ...)\"\r\n\r\n### Reproduction steps / Explicit stream URLs to test\r\n\r\n1. streamlink \"https://www.sportschau.de/tourdefrance/live/videostream-livestream---die--etappe-der-tour-de-france-nach-privas-100.html\"\r\n\r\n\r\n### Log output\r\n\r\n```\r\n[14:25:23,464][cli][debug] OS: Linux-5.8.4-x86_64-with-glibc2.2.5\r\n[14:25:23,464][cli][debug] Python: 3.8.5\r\n[14:25:23,464][cli][debug] Streamlink: 1.5.0\r\n[14:25:23,464][cli][debug] Requests(2.24.0), Socks(1.7.1), Websocket(0.57.0)\r\n[14:25:23,465][cli][info] Found matching plugin sportschau for URL https://www.sportschau.de/tourdefrance/live/videostream-livestream---die--etappe-der-tour-de-france-nach-privas-100.html\r\n[14:25:23,734][plugin.sportschau][info] Found player js http://deviceids-medp.wdr.de/ondemand/221/2214170.js\r\nerror: Unable to parse manifest XML: syntax error: line 1, column 0 (b'#EXTM3U\\n#EXT-X-VERSION:3\\n#EXT-X ...)\r\n```\r\n\r\n\r\n### Additional comments, screenshots, etc.\r\n\r\nNot sure that I understand the cause of the error, especially as the problematic part seems truncated. This is what the .m3u file looks like:\r\n\r\n```\r\n#EXTM3U\r\n#EXT-X-VERSION:3\r\n#EXT-X-INDEPENDENT-SEGMENTS\r\n#EXT-X-STREAM-INF:BANDWIDTH=5388416,AVERAGE-BANDWIDTH=4048000,CODECS=\"avc1.640020,mp4a.40.2\",RESOLUTION=1280x720,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_3680.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=5388416,AVERAGE-BANDWIDTH=4048000,CODECS=\"avc1.640020,mp4a.40.2\",RESOLUTION=1280x720,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_3680.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=2758800,AVERAGE-BANDWIDTH=2085600,CODECS=\"avc1.4d401f,mp4a.40.2\",RESOLUTION=960x540,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_1896.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=2758800,AVERAGE-BANDWIDTH=2085600,CODECS=\"avc1.4d401f,mp4a.40.2\",RESOLUTION=960x540,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_1896.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=1614976,AVERAGE-BANDWIDTH=1232000,CODECS=\"avc1.4d401f,mp4a.40.2\",RESOLUTION=640x360,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_1120.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=1614976,AVERAGE-BANDWIDTH=1232000,CODECS=\"avc1.4d401f,mp4a.40.2\",RESOLUTION=640x360,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_1120.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=860288,AVERAGE-BANDWIDTH=668800,CODECS=\"avc1.77.30,mp4a.40.2\",RESOLUTION=512x288,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_608.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=860288,AVERAGE-BANDWIDTH=668800,CODECS=\"avc1.77.30,mp4a.40.2\",RESOLUTION=512x288,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_608.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=482944,AVERAGE-BANDWIDTH=387200,CODECS=\"avc1.66.30,mp4a.40.2\",RESOLUTION=480x270,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_352.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=482944,AVERAGE-BANDWIDTH=387200,CODECS=\"avc1.66.30,mp4a.40.2\",RESOLUTION=480x270,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_352.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=294272,AVERAGE-BANDWIDTH=246400,CODECS=\"avc1.42c015,mp4a.40.2\",RESOLUTION=320x180,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512/ardevent2_geo/master_224.m3u8\r\n#EXT-X-STREAM-INF:BANDWIDTH=294272,AVERAGE-BANDWIDTH=246400,CODECS=\"avc1.42c015,mp4a.40.2\",RESOLUTION=320x180,FRAME-RATE=50.000\r\nhttps://ardevent2.akamaized.net/hls/live/681512-b/ardevent2_geo/master_224.m3u8\r\n```\n", "before_files": [{"content": "import re\r\nimport json\r\n\r\nfrom streamlink.plugin import Plugin\r\nfrom streamlink.stream import HDSStream\r\nfrom streamlink.utils import update_scheme\r\n\r\n_url_re = re.compile(r\"http(s)?://(\\w+\\.)?sportschau.de/\")\r\n_player_js = re.compile(r\"https?://deviceids-medp.wdr.de/ondemand/.*\\.js\")\r\n\r\n\r\nclass sportschau(Plugin):\r\n @classmethod\r\n def can_handle_url(cls, url):\r\n return _url_re.match(url)\r\n\r\n def _get_streams(self):\r\n res = self.session.http.get(self.url)\r\n match = _player_js.search(res.text)\r\n if match:\r\n player_js = match.group(0)\r\n self.logger.info(\"Found player js {0}\", player_js)\r\n else:\r\n self.logger.info(\"Didn't find player js. Probably this page doesn't contain a video\")\r\n return\r\n\r\n res = self.session.http.get(player_js)\r\n\r\n jsonp_start = res.text.find('(') + 1\r\n jsonp_end = res.text.rfind(')')\r\n\r\n if jsonp_start <= 0 or jsonp_end <= 0:\r\n self.logger.info(\"Couldn't extract json metadata from player.js: {0}\", player_js)\r\n return\r\n\r\n json_s = res.text[jsonp_start:jsonp_end]\r\n\r\n stream_metadata = json.loads(json_s)\r\n\r\n hds_url = stream_metadata['mediaResource']['dflt']['videoURL']\r\n hds_url = update_scheme(self.url, hds_url)\r\n\r\n return HDSStream.parse_manifest(self.session, hds_url).items()\r\n\r\n\r\n__plugin__ = sportschau\r\n", "path": "src/streamlink/plugins/sportschau.py"}], "after_files": [{"content": "import logging\nimport re\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream import HLSStream\nfrom streamlink.utils import parse_json, update_scheme\n\nlog = logging.getLogger(__name__)\n\n\nclass Sportschau(Plugin):\n _re_url = re.compile(r\"https?://(?:\\w+\\.)*sportschau.de/\")\n\n _re_player = re.compile(r\"https?:(//deviceids-medp.wdr.de/ondemand/\\S+\\.js)\")\n _re_json = re.compile(r\"\\$mediaObject.jsonpHelper.storeAndPlay\\(({.+})\\);?\")\n\n _schema_player = validate.Schema(\n validate.transform(_re_player.search),\n validate.any(None, validate.Schema(\n validate.get(1),\n validate.transform(lambda url: update_scheme(\"https:\", url))\n ))\n )\n _schema_json = validate.Schema(\n validate.transform(_re_json.match),\n validate.get(1),\n validate.transform(parse_json),\n validate.get(\"mediaResource\"),\n validate.get(\"dflt\"),\n validate.get(\"videoURL\"),\n validate.transform(lambda url: update_scheme(\"https:\", url))\n )\n\n @classmethod\n def can_handle_url(cls, url):\n return cls._re_url.match(url) is not None\n\n def _get_streams(self):\n player_js = self.session.http.get(self.url, schema=self._schema_player)\n if not player_js:\n return\n\n log.debug(\"Found player js {0}\".format(player_js))\n\n hls_url = self.session.http.get(player_js, schema=self._schema_json)\n\n for stream in HLSStream.parse_variant_playlist(self.session, hls_url).items():\n yield stream\n\n\n__plugin__ = Sportschau\n", "path": "src/streamlink/plugins/sportschau.py"}]} | 2,604 | 848 |
gh_patches_debug_38316 | rasdani/github-patches | git_diff | streamlink__streamlink-5946 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
plugins.turkuvaz: no data on minikacocuk.com.tr
### Checklist
- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)
- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)
- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)
- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)
### Streamlink version
6.7.2
### Description
### Debug log
```text
Not working Python 3!!!! "Minikacocuk" channel ,please help.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/turkuvaz.py`
Content:
```
1 """
2 $description Turkish live TV channels from Turkuvaz Media Group, including Ahaber, ATV, Minika COCUK and MinikaGO.
3 $url a2tv.com.tr
4 $url ahaber.com.tr
5 $url anews.com.tr
6 $url apara.com.tr
7 $url aspor.com.tr
8 $url atv.com.tr
9 $url atvavrupa.tv
10 $url minikacocuk.com.tr
11 $url minikago.com.tr
12 $url vavtv.com.tr
13 $type live, vod
14 $metadata id
15 $metadata title
16 $region various
17 """
18
19 import logging
20 import re
21
22 from streamlink.plugin import Plugin, pluginmatcher
23 from streamlink.plugin.api import validate
24 from streamlink.stream.hls import HLSStream
25
26
27 log = logging.getLogger(__name__)
28
29
30 @pluginmatcher(re.compile(r"""
31 https?://(?:www\.)?
32 (?:
33 atvavrupa\.tv
34 |
35 (?:a2tv|ahaber|anews|apara|aspor|atv|minikacocuk|minikago|vavtv)\.com\.tr
36 )
37 """, re.VERBOSE))
38 class Turkuvaz(Plugin):
39 def _get_streams(self):
40 _find_and_get_attrs = validate.Schema(
41 validate.xml_find(".//div[@data-videoid][@data-websiteid]"),
42 validate.union_get("data-videoid", "data-websiteid"),
43 )
44
45 id_data = self.session.http.get(
46 self.url,
47 schema=validate.Schema(
48 validate.parse_html(),
49 validate.any(
50 _find_and_get_attrs,
51 validate.all(
52 validate.xml_xpath_string(
53 ".//script[contains(text(),'data-videoid') and contains(text(),'data-websiteid')]/text()",
54 ),
55 validate.none_or_all(
56 str,
57 validate.regex(re.compile(r"""var\s+tmdPlayer\s*=\s*(?P<q>["'])(.*?)(?P=q)""")),
58 validate.get(0),
59 validate.parse_html(),
60 _find_and_get_attrs,
61 ),
62 ),
63 ),
64 ),
65 )
66
67 if not id_data:
68 return
69
70 video_id, website_id = id_data
71 log.debug(f"video_id={video_id}")
72 log.debug(f"website_id={website_id}")
73
74 self.id, self.title, hls_url = self.session.http.get(
75 f"https://videojs.tmgrup.com.tr/getvideo/{website_id}/{video_id}",
76 schema=validate.Schema(
77 validate.parse_json(),
78 {
79 "success": True,
80 "video": {
81 "VideoId": str,
82 "Title": str,
83 "VideoSmilUrl": validate.url(),
84 },
85 },
86 validate.get("video"),
87 validate.union_get("VideoId", "Title", "VideoSmilUrl"),
88 ),
89 )
90 log.debug(f"hls_url={hls_url}")
91
92 secure_hls_url = self.session.http.get(
93 "https://securevideotoken.tmgrup.com.tr/webtv/secure",
94 params=f"url={hls_url}",
95 headers={"Referer": self.url},
96 schema=validate.Schema(
97 validate.parse_json(),
98 {
99 "Success": True,
100 "Url": validate.url(),
101 },
102 validate.get("Url"),
103 ),
104 )
105 log.debug(f"secure_hls_url={secure_hls_url}")
106
107 if secure_hls_url:
108 return HLSStream.parse_variant_playlist(self.session, secure_hls_url)
109
110
111 __plugin__ = Turkuvaz
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/src/streamlink/plugins/turkuvaz.py b/src/streamlink/plugins/turkuvaz.py
--- a/src/streamlink/plugins/turkuvaz.py
+++ b/src/streamlink/plugins/turkuvaz.py
@@ -36,8 +36,19 @@
)
""", re.VERBOSE))
class Turkuvaz(Plugin):
+ _VIDEOID_LIVE = "00000000-0000-0000-0000-000000000000"
+
+ # hardcoded in https://i.tmgrup.com.tr/videojs/js/tmdplayersetup.js?v=651
+ # (via https://www.minikacocuk.com.tr/webtv/canli-yayin)
+ _MAPPING_WEBSITEID_HLSURL = {
+ "9BBE055A-4CF6-4BC3-A675-D40E89B55B91": "https://trkvz.daioncdn.net/aspor/aspor.m3u8?ce=3&app=45f847c4-04e8-419a-a561-2ebf87084765",
+ "0C1BC8FF-C3B1-45BE-A95B-F7BB9C8B03ED": "https://trkvz.daioncdn.net/a2tv/a2tv.m3u8?ce=3&app=59363a60-be96-4f73-9eff-355d0ff2c758",
+ "AAE2E325-4EAE-45B7-B017-26FD7DDB6CE4": "https://trkvz.daioncdn.net/minikago/minikago.m3u8?app=web&ce=3",
+ "01ED59F2-4067-4945-8204-45F6C6DB4045": "https://trkvz.daioncdn.net/minikago_cocuk/minikago_cocuk.m3u8?app=web&ce=3",
+ }
+
def _get_streams(self):
- _find_and_get_attrs = validate.Schema(
+ _find_and_get_attrs = validate.all(
validate.xml_find(".//div[@data-videoid][@data-websiteid]"),
validate.union_get("data-videoid", "data-websiteid"),
)
@@ -68,8 +79,8 @@
return
video_id, website_id = id_data
- log.debug(f"video_id={video_id}")
- log.debug(f"website_id={website_id}")
+ log.debug(f"{video_id=}")
+ log.debug(f"{website_id=}")
self.id, self.title, hls_url = self.session.http.get(
f"https://videojs.tmgrup.com.tr/getvideo/{website_id}/{video_id}",
@@ -87,11 +98,14 @@
validate.union_get("VideoId", "Title", "VideoSmilUrl"),
),
)
- log.debug(f"hls_url={hls_url}")
+
+ if video_id == self._VIDEOID_LIVE:
+ hls_url = self._MAPPING_WEBSITEID_HLSURL.get(website_id.upper(), hls_url)
+ log.debug(f"{hls_url=}")
secure_hls_url = self.session.http.get(
"https://securevideotoken.tmgrup.com.tr/webtv/secure",
- params=f"url={hls_url}",
+ params={"url": hls_url},
headers={"Referer": self.url},
schema=validate.Schema(
validate.parse_json(),
@@ -102,7 +116,7 @@
validate.get("Url"),
),
)
- log.debug(f"secure_hls_url={secure_hls_url}")
+ log.debug(f"{secure_hls_url=}")
if secure_hls_url:
return HLSStream.parse_variant_playlist(self.session, secure_hls_url)
| {"golden_diff": "diff --git a/src/streamlink/plugins/turkuvaz.py b/src/streamlink/plugins/turkuvaz.py\n--- a/src/streamlink/plugins/turkuvaz.py\n+++ b/src/streamlink/plugins/turkuvaz.py\n@@ -36,8 +36,19 @@\n )\n \"\"\", re.VERBOSE))\n class Turkuvaz(Plugin):\n+ _VIDEOID_LIVE = \"00000000-0000-0000-0000-000000000000\"\n+\n+ # hardcoded in https://i.tmgrup.com.tr/videojs/js/tmdplayersetup.js?v=651\n+ # (via https://www.minikacocuk.com.tr/webtv/canli-yayin)\n+ _MAPPING_WEBSITEID_HLSURL = {\n+ \"9BBE055A-4CF6-4BC3-A675-D40E89B55B91\": \"https://trkvz.daioncdn.net/aspor/aspor.m3u8?ce=3&app=45f847c4-04e8-419a-a561-2ebf87084765\",\n+ \"0C1BC8FF-C3B1-45BE-A95B-F7BB9C8B03ED\": \"https://trkvz.daioncdn.net/a2tv/a2tv.m3u8?ce=3&app=59363a60-be96-4f73-9eff-355d0ff2c758\",\n+ \"AAE2E325-4EAE-45B7-B017-26FD7DDB6CE4\": \"https://trkvz.daioncdn.net/minikago/minikago.m3u8?app=web&ce=3\",\n+ \"01ED59F2-4067-4945-8204-45F6C6DB4045\": \"https://trkvz.daioncdn.net/minikago_cocuk/minikago_cocuk.m3u8?app=web&ce=3\",\n+ }\n+\n def _get_streams(self):\n- _find_and_get_attrs = validate.Schema(\n+ _find_and_get_attrs = validate.all(\n validate.xml_find(\".//div[@data-videoid][@data-websiteid]\"),\n validate.union_get(\"data-videoid\", \"data-websiteid\"),\n )\n@@ -68,8 +79,8 @@\n return\n \n video_id, website_id = id_data\n- log.debug(f\"video_id={video_id}\")\n- log.debug(f\"website_id={website_id}\")\n+ log.debug(f\"{video_id=}\")\n+ log.debug(f\"{website_id=}\")\n \n self.id, self.title, hls_url = self.session.http.get(\n f\"https://videojs.tmgrup.com.tr/getvideo/{website_id}/{video_id}\",\n@@ -87,11 +98,14 @@\n validate.union_get(\"VideoId\", \"Title\", \"VideoSmilUrl\"),\n ),\n )\n- log.debug(f\"hls_url={hls_url}\")\n+\n+ if video_id == self._VIDEOID_LIVE:\n+ hls_url = self._MAPPING_WEBSITEID_HLSURL.get(website_id.upper(), hls_url)\n+ log.debug(f\"{hls_url=}\")\n \n secure_hls_url = self.session.http.get(\n \"https://securevideotoken.tmgrup.com.tr/webtv/secure\",\n- params=f\"url={hls_url}\",\n+ params={\"url\": hls_url},\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n@@ -102,7 +116,7 @@\n validate.get(\"Url\"),\n ),\n )\n- log.debug(f\"secure_hls_url={secure_hls_url}\")\n+ log.debug(f\"{secure_hls_url=}\")\n \n if secure_hls_url:\n return HLSStream.parse_variant_playlist(self.session, secure_hls_url)\n", "issue": "plugins.turkuvaz: no data on minikacocuk.com.tr\n### Checklist\r\n\r\n- [X] This is a [plugin issue](https://streamlink.github.io/plugins.html) and not [a different kind of issue](https://github.com/streamlink/streamlink/issues/new/choose)\r\n- [X] [I have read the contribution guidelines](https://github.com/streamlink/streamlink/blob/master/CONTRIBUTING.md#contributing-to-streamlink)\r\n- [X] [I have checked the list of open and recently closed plugin issues](https://github.com/streamlink/streamlink/issues?q=is%3Aissue+label%3A%22plugin+issue%22)\r\n- [X] [I have checked the commit log of the master branch](https://github.com/streamlink/streamlink/commits/master)\r\n\r\n### Streamlink version\r\n\r\n6.7.2\r\n\r\n### Description\r\n\r\n### Debug log\r\n\r\n```text\r\nNot working Python 3!!!! \"Minikacocuk\" channel ,please help.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\n$description Turkish live TV channels from Turkuvaz Media Group, including Ahaber, ATV, Minika COCUK and MinikaGO.\n$url a2tv.com.tr\n$url ahaber.com.tr\n$url anews.com.tr\n$url apara.com.tr\n$url aspor.com.tr\n$url atv.com.tr\n$url atvavrupa.tv\n$url minikacocuk.com.tr\n$url minikago.com.tr\n$url vavtv.com.tr\n$type live, vod\n$metadata id\n$metadata title\n$region various\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?\n (?:\n atvavrupa\\.tv\n |\n (?:a2tv|ahaber|anews|apara|aspor|atv|minikacocuk|minikago|vavtv)\\.com\\.tr\n )\n\"\"\", re.VERBOSE))\nclass Turkuvaz(Plugin):\n def _get_streams(self):\n _find_and_get_attrs = validate.Schema(\n validate.xml_find(\".//div[@data-videoid][@data-websiteid]\"),\n validate.union_get(\"data-videoid\", \"data-websiteid\"),\n )\n\n id_data = self.session.http.get(\n self.url,\n schema=validate.Schema(\n validate.parse_html(),\n validate.any(\n _find_and_get_attrs,\n validate.all(\n validate.xml_xpath_string(\n \".//script[contains(text(),'data-videoid') and contains(text(),'data-websiteid')]/text()\",\n ),\n validate.none_or_all(\n str,\n validate.regex(re.compile(r\"\"\"var\\s+tmdPlayer\\s*=\\s*(?P<q>[\"'])(.*?)(?P=q)\"\"\")),\n validate.get(0),\n validate.parse_html(),\n _find_and_get_attrs,\n ),\n ),\n ),\n ),\n )\n\n if not id_data:\n return\n\n video_id, website_id = id_data\n log.debug(f\"video_id={video_id}\")\n log.debug(f\"website_id={website_id}\")\n\n self.id, self.title, hls_url = self.session.http.get(\n f\"https://videojs.tmgrup.com.tr/getvideo/{website_id}/{video_id}\",\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"success\": True,\n \"video\": {\n \"VideoId\": str,\n \"Title\": str,\n \"VideoSmilUrl\": validate.url(),\n },\n },\n validate.get(\"video\"),\n validate.union_get(\"VideoId\", \"Title\", \"VideoSmilUrl\"),\n ),\n )\n log.debug(f\"hls_url={hls_url}\")\n\n secure_hls_url = self.session.http.get(\n \"https://securevideotoken.tmgrup.com.tr/webtv/secure\",\n params=f\"url={hls_url}\",\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"Success\": True,\n \"Url\": validate.url(),\n },\n validate.get(\"Url\"),\n ),\n )\n log.debug(f\"secure_hls_url={secure_hls_url}\")\n\n if secure_hls_url:\n return HLSStream.parse_variant_playlist(self.session, secure_hls_url)\n\n\n__plugin__ = Turkuvaz\n", "path": "src/streamlink/plugins/turkuvaz.py"}], "after_files": [{"content": "\"\"\"\n$description Turkish live TV channels from Turkuvaz Media Group, including Ahaber, ATV, Minika COCUK and MinikaGO.\n$url a2tv.com.tr\n$url ahaber.com.tr\n$url anews.com.tr\n$url apara.com.tr\n$url aspor.com.tr\n$url atv.com.tr\n$url atvavrupa.tv\n$url minikacocuk.com.tr\n$url minikago.com.tr\n$url vavtv.com.tr\n$type live, vod\n$metadata id\n$metadata title\n$region various\n\"\"\"\n\nimport logging\nimport re\n\nfrom streamlink.plugin import Plugin, pluginmatcher\nfrom streamlink.plugin.api import validate\nfrom streamlink.stream.hls import HLSStream\n\n\nlog = logging.getLogger(__name__)\n\n\n@pluginmatcher(re.compile(r\"\"\"\n https?://(?:www\\.)?\n (?:\n atvavrupa\\.tv\n |\n (?:a2tv|ahaber|anews|apara|aspor|atv|minikacocuk|minikago|vavtv)\\.com\\.tr\n )\n\"\"\", re.VERBOSE))\nclass Turkuvaz(Plugin):\n _VIDEOID_LIVE = \"00000000-0000-0000-0000-000000000000\"\n\n # hardcoded in https://i.tmgrup.com.tr/videojs/js/tmdplayersetup.js?v=651\n # (via https://www.minikacocuk.com.tr/webtv/canli-yayin)\n _MAPPING_WEBSITEID_HLSURL = {\n \"9BBE055A-4CF6-4BC3-A675-D40E89B55B91\": \"https://trkvz.daioncdn.net/aspor/aspor.m3u8?ce=3&app=45f847c4-04e8-419a-a561-2ebf87084765\",\n \"0C1BC8FF-C3B1-45BE-A95B-F7BB9C8B03ED\": \"https://trkvz.daioncdn.net/a2tv/a2tv.m3u8?ce=3&app=59363a60-be96-4f73-9eff-355d0ff2c758\",\n \"AAE2E325-4EAE-45B7-B017-26FD7DDB6CE4\": \"https://trkvz.daioncdn.net/minikago/minikago.m3u8?app=web&ce=3\",\n \"01ED59F2-4067-4945-8204-45F6C6DB4045\": \"https://trkvz.daioncdn.net/minikago_cocuk/minikago_cocuk.m3u8?app=web&ce=3\",\n }\n\n def _get_streams(self):\n _find_and_get_attrs = validate.all(\n validate.xml_find(\".//div[@data-videoid][@data-websiteid]\"),\n validate.union_get(\"data-videoid\", \"data-websiteid\"),\n )\n\n id_data = self.session.http.get(\n self.url,\n schema=validate.Schema(\n validate.parse_html(),\n validate.any(\n _find_and_get_attrs,\n validate.all(\n validate.xml_xpath_string(\n \".//script[contains(text(),'data-videoid') and contains(text(),'data-websiteid')]/text()\",\n ),\n validate.none_or_all(\n str,\n validate.regex(re.compile(r\"\"\"var\\s+tmdPlayer\\s*=\\s*(?P<q>[\"'])(.*?)(?P=q)\"\"\")),\n validate.get(0),\n validate.parse_html(),\n _find_and_get_attrs,\n ),\n ),\n ),\n ),\n )\n\n if not id_data:\n return\n\n video_id, website_id = id_data\n log.debug(f\"{video_id=}\")\n log.debug(f\"{website_id=}\")\n\n self.id, self.title, hls_url = self.session.http.get(\n f\"https://videojs.tmgrup.com.tr/getvideo/{website_id}/{video_id}\",\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"success\": True,\n \"video\": {\n \"VideoId\": str,\n \"Title\": str,\n \"VideoSmilUrl\": validate.url(),\n },\n },\n validate.get(\"video\"),\n validate.union_get(\"VideoId\", \"Title\", \"VideoSmilUrl\"),\n ),\n )\n\n if video_id == self._VIDEOID_LIVE:\n hls_url = self._MAPPING_WEBSITEID_HLSURL.get(website_id.upper(), hls_url)\n log.debug(f\"{hls_url=}\")\n\n secure_hls_url = self.session.http.get(\n \"https://securevideotoken.tmgrup.com.tr/webtv/secure\",\n params={\"url\": hls_url},\n headers={\"Referer\": self.url},\n schema=validate.Schema(\n validate.parse_json(),\n {\n \"Success\": True,\n \"Url\": validate.url(),\n },\n validate.get(\"Url\"),\n ),\n )\n log.debug(f\"{secure_hls_url=}\")\n\n if secure_hls_url:\n return HLSStream.parse_variant_playlist(self.session, secure_hls_url)\n\n\n__plugin__ = Turkuvaz\n", "path": "src/streamlink/plugins/turkuvaz.py"}]} | 1,474 | 935 |
gh_patches_debug_20060 | rasdani/github-patches | git_diff | fail2ban__fail2ban-1144 | We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Fail2ban git fails to install from PKGBUILD on Arch Linux /var/run/ exists
Hi there.
I wrote (re-wrote / modified / plagiarized...) PKGBUILD a while back:
http://pastebin.com/raw.php?i=5E4cpjNq
It used to work fine and sweet... but now:
[andrzejl@andrzejl fail2ban-git]$ makepkg -s -i ./
==> WARNING: Cannot find the sudo binary. Will use su to acquire root privileges.
==> Making package: fail2ban-git 0.9.2.r132.gc37009a-1 (Thu Jul 30 18:25:25 IST 2015)
==> Checking runtime dependencies...
==> Checking buildtime dependencies...
==> Retrieving sources...
-> Updating fail2ban-git git repo...
Fetching origin
==> Validating source files with sha512sums...
fail2ban-git ... Skipped
==> Extracting sources...
-> Creating working copy of fail2ban git repo...
Switched to a new branch 'makepkg'
==> Starting pkgver()...
==> WARNING: A package has already been built, installing existing package...
==> Installing package fail2ban-git with pacman -U...
Password:
loading packages...
resolving dependencies...
looking for conflicting packages...
Packages (1) fail2ban-git-0.9.2.r132.gc37009a-1
Total Installed Size: 1.87 MiB
Net Upgrade Size: 0.03 MiB
:: Proceed with installation? [Y/n](1/1) checking keys in keyring [##########################################] 100%
(1/1) checking package integrity [##########################################] 100%
(1/1) loading package files [##########################################] 100%
(1/1) checking for file conflicts [##########################################] 100%
error: failed to commit transaction (conflicting files)
fail2ban-git: /var/run exists in filesystem
Errors occurred, no packages were upgraded.
==> WARNING: Failed to install built package(s).
[andrzejl@andrzejl fail2ban-git]$
The problem is that:
[root@andrzejl andrzejl]# ls --full /var/ | grep run
lrwxrwxrwx 1 root root 11 2015-02-15 21:58:46.000000000 +0000 lock -> ../run/lock
lrwxrwxrwx 1 root root 6 2015-02-15 21:58:46.000000000 +0000 run -> ../run
[root@andrzejl andrzejl]#
/var/run is a symlink pointing to /run.
Anyone knows how to bite this thing?
Cheers.
Andrzej
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/python
2 # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-
3 # vi: set ft=python sts=4 ts=4 sw=4 noet :
4
5 # This file is part of Fail2Ban.
6 #
7 # Fail2Ban is free software; you can redistribute it and/or modify
8 # it under the terms of the GNU General Public License as published by
9 # the Free Software Foundation; either version 2 of the License, or
10 # (at your option) any later version.
11 #
12 # Fail2Ban is distributed in the hope that it will be useful,
13 # but WITHOUT ANY WARRANTY; without even the implied warranty of
14 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 # GNU General Public License for more details.
16 #
17 # You should have received a copy of the GNU General Public License
18 # along with Fail2Ban; if not, write to the Free Software
19 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20
21 __author__ = "Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko"
22 __copyright__ = "Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors"
23 __license__ = "GPL"
24
25 try:
26 import setuptools
27 from setuptools import setup
28 except ImportError:
29 setuptools = None
30 from distutils.core import setup
31
32 try:
33 # python 3.x
34 from distutils.command.build_py import build_py_2to3 as build_py
35 from distutils.command.build_scripts \
36 import build_scripts_2to3 as build_scripts
37 except ImportError:
38 # python 2.x
39 from distutils.command.build_py import build_py
40 from distutils.command.build_scripts import build_scripts
41 import os
42 from os.path import isfile, join, isdir
43 import sys
44 import warnings
45 from glob import glob
46
47 if setuptools and "test" in sys.argv:
48 import logging
49 logSys = logging.getLogger("fail2ban")
50 hdlr = logging.StreamHandler(sys.stdout)
51 fmt = logging.Formatter("%(asctime)-15s %(message)s")
52 hdlr.setFormatter(fmt)
53 logSys.addHandler(hdlr)
54 if set(["-q", "--quiet"]) & set(sys.argv):
55 logSys.setLevel(logging.CRITICAL)
56 warnings.simplefilter("ignore")
57 sys.warnoptions.append("ignore")
58 elif set(["-v", "--verbose"]) & set(sys.argv):
59 logSys.setLevel(logging.DEBUG)
60 else:
61 logSys.setLevel(logging.INFO)
62 elif "test" in sys.argv:
63 print("python distribute required to execute fail2ban tests")
64 print("")
65
66 longdesc = '''
67 Fail2Ban scans log files like /var/log/pwdfail or
68 /var/log/apache/error_log and bans IP that makes
69 too many password failures. It updates firewall rules
70 to reject the IP address or executes user defined
71 commands.'''
72
73 if setuptools:
74 setup_extra = {
75 'test_suite': "fail2ban.tests.utils.gatherTests",
76 'use_2to3': True,
77 }
78 else:
79 setup_extra = {}
80
81 data_files_extra = []
82 if os.path.exists('/var/run'):
83 # if we are on the system with /var/run -- we are to use it for having fail2ban/
84 # directory there for socket file etc
85 data_files_extra += [('/var/run/fail2ban', '')]
86
87 # Get version number, avoiding importing fail2ban.
88 # This is due to tests not functioning for python3 as 2to3 takes place later
89 exec(open(join("fail2ban", "version.py")).read())
90
91 setup(
92 name = "fail2ban",
93 version = version,
94 description = "Ban IPs that make too many password failures",
95 long_description = longdesc,
96 author = "Cyril Jaquier & Fail2Ban Contributors",
97 author_email = "[email protected]",
98 url = "http://www.fail2ban.org",
99 license = "GPL",
100 platforms = "Posix",
101 cmdclass = {'build_py': build_py, 'build_scripts': build_scripts},
102 scripts = [
103 'bin/fail2ban-client',
104 'bin/fail2ban-server',
105 'bin/fail2ban-regex',
106 'bin/fail2ban-testcases',
107 ],
108 packages = [
109 'fail2ban',
110 'fail2ban.client',
111 'fail2ban.server',
112 'fail2ban.tests',
113 'fail2ban.tests.action_d',
114 ],
115 package_data = {
116 'fail2ban.tests':
117 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
118 for w in os.walk('fail2ban/tests/files')
119 for f in w[2]] +
120 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
121 for w in os.walk('fail2ban/tests/config')
122 for f in w[2]] +
123 [ join(w[0], f).replace("fail2ban/tests/", "", 1)
124 for w in os.walk('fail2ban/tests/action_d')
125 for f in w[2]]
126 },
127 data_files = [
128 ('/etc/fail2ban',
129 glob("config/*.conf")
130 ),
131 ('/etc/fail2ban/filter.d',
132 glob("config/filter.d/*.conf")
133 ),
134 ('/etc/fail2ban/filter.d/ignorecommands',
135 glob("config/filter.d/ignorecommands/*")
136 ),
137 ('/etc/fail2ban/action.d',
138 glob("config/action.d/*.conf") +
139 glob("config/action.d/*.py")
140 ),
141 ('/etc/fail2ban/fail2ban.d',
142 ''
143 ),
144 ('/etc/fail2ban/jail.d',
145 ''
146 ),
147 ('/var/lib/fail2ban',
148 ''
149 ),
150 ('/usr/share/doc/fail2ban',
151 ['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',
152 'doc/run-rootless.txt']
153 )
154 ] + data_files_extra,
155 **setup_extra
156 )
157
158 # Do some checks after installation
159 # Search for obsolete files.
160 obsoleteFiles = []
161 elements = {
162 "/etc/":
163 [
164 "fail2ban.conf"
165 ],
166 "/usr/bin/":
167 [
168 "fail2ban.py"
169 ],
170 "/usr/lib/fail2ban/":
171 [
172 "version.py",
173 "protocol.py"
174 ]
175 }
176
177 for directory in elements:
178 for f in elements[directory]:
179 path = join(directory, f)
180 if isfile(path):
181 obsoleteFiles.append(path)
182
183 if obsoleteFiles:
184 print("")
185 print("Obsolete files from previous Fail2Ban versions were found on "
186 "your system.")
187 print("Please delete them:")
188 print("")
189 for f in obsoleteFiles:
190 print("\t" + f)
191 print("")
192
193 if isdir("/usr/lib/fail2ban"):
194 print("")
195 print("Fail2ban is not installed under /usr/lib anymore. The new "
196 "location is under /usr/share. Please remove the directory "
197 "/usr/lib/fail2ban and everything under this directory.")
198 print("")
199
200 # Update config file
201 if sys.argv[1] == "install":
202 print("")
203 print("Please do not forget to update your configuration files.")
204 print("They are in /etc/fail2ban/.")
205 print("")
206
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
| diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -39,7 +39,7 @@
from distutils.command.build_py import build_py
from distutils.command.build_scripts import build_scripts
import os
-from os.path import isfile, join, isdir
+from os.path import isfile, join, isdir, realpath
import sys
import warnings
from glob import glob
@@ -81,8 +81,9 @@
data_files_extra = []
if os.path.exists('/var/run'):
# if we are on the system with /var/run -- we are to use it for having fail2ban/
- # directory there for socket file etc
- data_files_extra += [('/var/run/fail2ban', '')]
+ # directory there for socket file etc.
+ # realpath is used to possibly resolve /var/run -> /run symlink
+ data_files_extra += [(realpath('/var/run/fail2ban'), '')]
# Get version number, avoiding importing fail2ban.
# This is due to tests not functioning for python3 as 2to3 takes place later
| {"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -39,7 +39,7 @@\n \tfrom distutils.command.build_py import build_py\n \tfrom distutils.command.build_scripts import build_scripts\n import os\n-from os.path import isfile, join, isdir\n+from os.path import isfile, join, isdir, realpath\n import sys\n import warnings\n from glob import glob\n@@ -81,8 +81,9 @@\n data_files_extra = []\n if os.path.exists('/var/run'):\n \t# if we are on the system with /var/run -- we are to use it for having fail2ban/\n-\t# directory there for socket file etc\n-\tdata_files_extra += [('/var/run/fail2ban', '')]\n+\t# directory there for socket file etc.\n+\t# realpath is used to possibly resolve /var/run -> /run symlink\n+\tdata_files_extra += [(realpath('/var/run/fail2ban'), '')]\n \n # Get version number, avoiding importing fail2ban.\n # This is due to tests not functioning for python3 as 2to3 takes place later\n", "issue": "Fail2ban git fails to install from PKGBUILD on Arch Linux /var/run/ exists\nHi there.\n\nI wrote (re-wrote / modified / plagiarized...) PKGBUILD a while back:\n\nhttp://pastebin.com/raw.php?i=5E4cpjNq\n\nIt used to work fine and sweet... but now:\n\n[andrzejl@andrzejl fail2ban-git]$ makepkg -s -i ./\n==> WARNING: Cannot find the sudo binary. Will use su to acquire root privileges.\n==> Making package: fail2ban-git 0.9.2.r132.gc37009a-1 (Thu Jul 30 18:25:25 IST 2015)\n==> Checking runtime dependencies...\n==> Checking buildtime dependencies...\n==> Retrieving sources...\n -> Updating fail2ban-git git repo...\nFetching origin\n==> Validating source files with sha512sums...\n fail2ban-git ... Skipped\n==> Extracting sources...\n -> Creating working copy of fail2ban git repo...\nSwitched to a new branch 'makepkg'\n==> Starting pkgver()...\n==> WARNING: A package has already been built, installing existing package...\n==> Installing package fail2ban-git with pacman -U...\nPassword:\nloading packages...\nresolving dependencies...\nlooking for conflicting packages...\n\nPackages (1) fail2ban-git-0.9.2.r132.gc37009a-1\n\nTotal Installed Size: 1.87 MiB\nNet Upgrade Size: 0.03 MiB\n\n:: Proceed with installation? [Y/n](1/1) checking keys in keyring [##########################################] 100%\n(1/1) checking package integrity [##########################################] 100%\n(1/1) loading package files [##########################################] 100%\n(1/1) checking for file conflicts [##########################################] 100%\nerror: failed to commit transaction (conflicting files)\nfail2ban-git: /var/run exists in filesystem\nErrors occurred, no packages were upgraded.\n==> WARNING: Failed to install built package(s).\n[andrzejl@andrzejl fail2ban-git]$ \n\nThe problem is that:\n\n[root@andrzejl andrzejl]# ls --full /var/ | grep run\nlrwxrwxrwx 1 root root 11 2015-02-15 21:58:46.000000000 +0000 lock -> ../run/lock\nlrwxrwxrwx 1 root root 6 2015-02-15 21:58:46.000000000 +0000 run -> ../run\n[root@andrzejl andrzejl]# \n\n/var/run is a symlink pointing to /run.\n\nAnyone knows how to bite this thing?\n\nCheers.\n\nAndrzej\n\n", "before_files": [{"content": "#!/usr/bin/python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n__author__ = \"Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors\"\n__license__ = \"GPL\"\n\ntry:\n\timport setuptools\n\tfrom setuptools import setup\nexcept ImportError:\n\tsetuptools = None\n\tfrom distutils.core import setup\n\ntry:\n\t# python 3.x\n\tfrom distutils.command.build_py import build_py_2to3 as build_py\n\tfrom distutils.command.build_scripts \\\n\t\timport build_scripts_2to3 as build_scripts\nexcept ImportError:\n\t# python 2.x\n\tfrom distutils.command.build_py import build_py\n\tfrom distutils.command.build_scripts import build_scripts\nimport os\nfrom os.path import isfile, join, isdir\nimport sys\nimport warnings\nfrom glob import glob\n\nif setuptools and \"test\" in sys.argv:\n\timport logging\n\tlogSys = logging.getLogger(\"fail2ban\")\n\thdlr = logging.StreamHandler(sys.stdout)\n\tfmt = logging.Formatter(\"%(asctime)-15s %(message)s\")\n\thdlr.setFormatter(fmt)\n\tlogSys.addHandler(hdlr)\n\tif set([\"-q\", \"--quiet\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.CRITICAL)\n\t\twarnings.simplefilter(\"ignore\")\n\t\tsys.warnoptions.append(\"ignore\")\n\telif set([\"-v\", \"--verbose\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.DEBUG)\n\telse:\n\t\tlogSys.setLevel(logging.INFO)\nelif \"test\" in sys.argv:\n\tprint(\"python distribute required to execute fail2ban tests\")\n\tprint(\"\")\n\nlongdesc = '''\nFail2Ban scans log files like /var/log/pwdfail or\n/var/log/apache/error_log and bans IP that makes\ntoo many password failures. It updates firewall rules\nto reject the IP address or executes user defined\ncommands.'''\n\nif setuptools:\n\tsetup_extra = {\n\t\t'test_suite': \"fail2ban.tests.utils.gatherTests\",\n\t\t'use_2to3': True,\n\t}\nelse:\n\tsetup_extra = {}\n\ndata_files_extra = []\nif os.path.exists('/var/run'):\n\t# if we are on the system with /var/run -- we are to use it for having fail2ban/\n\t# directory there for socket file etc\n\tdata_files_extra += [('/var/run/fail2ban', '')]\n\n# Get version number, avoiding importing fail2ban.\n# This is due to tests not functioning for python3 as 2to3 takes place later\nexec(open(join(\"fail2ban\", \"version.py\")).read())\n\nsetup(\n\tname = \"fail2ban\",\n\tversion = version,\n\tdescription = \"Ban IPs that make too many password failures\",\n\tlong_description = longdesc,\n\tauthor = \"Cyril Jaquier & Fail2Ban Contributors\",\n\tauthor_email = \"[email protected]\",\n\turl = \"http://www.fail2ban.org\",\n\tlicense = \"GPL\",\n\tplatforms = \"Posix\",\n\tcmdclass = {'build_py': build_py, 'build_scripts': build_scripts},\n\tscripts = [\n\t\t'bin/fail2ban-client',\n\t\t'bin/fail2ban-server',\n\t\t'bin/fail2ban-regex',\n\t\t'bin/fail2ban-testcases',\n\t],\n\tpackages = [\n\t\t'fail2ban',\n\t\t'fail2ban.client',\n\t\t'fail2ban.server',\n\t\t'fail2ban.tests',\n\t\t'fail2ban.tests.action_d',\n\t],\n\tpackage_data = {\n\t\t'fail2ban.tests':\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/files')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/config')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/action_d')\n\t\t\t\tfor f in w[2]]\n\t},\n\tdata_files = [\n\t\t('/etc/fail2ban',\n\t\t\tglob(\"config/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d',\n\t\t\tglob(\"config/filter.d/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d/ignorecommands',\n\t\t\tglob(\"config/filter.d/ignorecommands/*\")\n\t\t),\n\t\t('/etc/fail2ban/action.d',\n\t\t\tglob(\"config/action.d/*.conf\") +\n\t\t\tglob(\"config/action.d/*.py\")\n\t\t),\n\t\t('/etc/fail2ban/fail2ban.d',\n\t\t\t''\n\t\t),\n\t\t('/etc/fail2ban/jail.d',\n\t\t\t''\n\t\t),\n\t\t('/var/lib/fail2ban',\n\t\t\t''\n\t\t),\n\t\t('/usr/share/doc/fail2ban',\n\t\t\t['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',\n\t\t\t 'doc/run-rootless.txt']\n\t\t)\n\t] + data_files_extra,\n\t**setup_extra\n)\n\n# Do some checks after installation\n# Search for obsolete files.\nobsoleteFiles = []\nelements = {\n\t\"/etc/\":\n\t\t[\n\t\t\t\"fail2ban.conf\"\n\t\t],\n\t\"/usr/bin/\":\n\t\t[\n\t\t\t\"fail2ban.py\"\n\t\t],\n\t\"/usr/lib/fail2ban/\":\n\t\t[\n\t\t\t\"version.py\",\n\t\t\t\"protocol.py\"\n\t\t]\n}\n\nfor directory in elements:\n\tfor f in elements[directory]:\n\t\tpath = join(directory, f)\n\t\tif isfile(path):\n\t\t\tobsoleteFiles.append(path)\n\nif obsoleteFiles:\n\tprint(\"\")\n\tprint(\"Obsolete files from previous Fail2Ban versions were found on \"\n\t\t \"your system.\")\n\tprint(\"Please delete them:\")\n\tprint(\"\")\n\tfor f in obsoleteFiles:\n\t\tprint(\"\\t\" + f)\n\tprint(\"\")\n\nif isdir(\"/usr/lib/fail2ban\"):\n\tprint(\"\")\n\tprint(\"Fail2ban is not installed under /usr/lib anymore. The new \"\n\t\t \"location is under /usr/share. Please remove the directory \"\n\t\t \"/usr/lib/fail2ban and everything under this directory.\")\n\tprint(\"\")\n\n# Update config file\nif sys.argv[1] == \"install\":\n\tprint(\"\")\n\tprint(\"Please do not forget to update your configuration files.\")\n\tprint(\"They are in /etc/fail2ban/.\")\n\tprint(\"\")\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/python\n# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: t -*-\n# vi: set ft=python sts=4 ts=4 sw=4 noet :\n\n# This file is part of Fail2Ban.\n#\n# Fail2Ban is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# Fail2Ban is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Fail2Ban; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\n__author__ = \"Cyril Jaquier, Steven Hiscocks, Yaroslav Halchenko\"\n__copyright__ = \"Copyright (c) 2004 Cyril Jaquier, 2008-2013 Fail2Ban Contributors\"\n__license__ = \"GPL\"\n\ntry:\n\timport setuptools\n\tfrom setuptools import setup\nexcept ImportError:\n\tsetuptools = None\n\tfrom distutils.core import setup\n\ntry:\n\t# python 3.x\n\tfrom distutils.command.build_py import build_py_2to3 as build_py\n\tfrom distutils.command.build_scripts \\\n\t\timport build_scripts_2to3 as build_scripts\nexcept ImportError:\n\t# python 2.x\n\tfrom distutils.command.build_py import build_py\n\tfrom distutils.command.build_scripts import build_scripts\nimport os\nfrom os.path import isfile, join, isdir, realpath\nimport sys\nimport warnings\nfrom glob import glob\n\nif setuptools and \"test\" in sys.argv:\n\timport logging\n\tlogSys = logging.getLogger(\"fail2ban\")\n\thdlr = logging.StreamHandler(sys.stdout)\n\tfmt = logging.Formatter(\"%(asctime)-15s %(message)s\")\n\thdlr.setFormatter(fmt)\n\tlogSys.addHandler(hdlr)\n\tif set([\"-q\", \"--quiet\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.CRITICAL)\n\t\twarnings.simplefilter(\"ignore\")\n\t\tsys.warnoptions.append(\"ignore\")\n\telif set([\"-v\", \"--verbose\"]) & set(sys.argv):\n\t\tlogSys.setLevel(logging.DEBUG)\n\telse:\n\t\tlogSys.setLevel(logging.INFO)\nelif \"test\" in sys.argv:\n\tprint(\"python distribute required to execute fail2ban tests\")\n\tprint(\"\")\n\nlongdesc = '''\nFail2Ban scans log files like /var/log/pwdfail or\n/var/log/apache/error_log and bans IP that makes\ntoo many password failures. It updates firewall rules\nto reject the IP address or executes user defined\ncommands.'''\n\nif setuptools:\n\tsetup_extra = {\n\t\t'test_suite': \"fail2ban.tests.utils.gatherTests\",\n\t\t'use_2to3': True,\n\t}\nelse:\n\tsetup_extra = {}\n\ndata_files_extra = []\nif os.path.exists('/var/run'):\n\t# if we are on the system with /var/run -- we are to use it for having fail2ban/\n\t# directory there for socket file etc.\n\t# realpath is used to possibly resolve /var/run -> /run symlink\n\tdata_files_extra += [(realpath('/var/run/fail2ban'), '')]\n\n# Get version number, avoiding importing fail2ban.\n# This is due to tests not functioning for python3 as 2to3 takes place later\nexec(open(join(\"fail2ban\", \"version.py\")).read())\n\nsetup(\n\tname = \"fail2ban\",\n\tversion = version,\n\tdescription = \"Ban IPs that make too many password failures\",\n\tlong_description = longdesc,\n\tauthor = \"Cyril Jaquier & Fail2Ban Contributors\",\n\tauthor_email = \"[email protected]\",\n\turl = \"http://www.fail2ban.org\",\n\tlicense = \"GPL\",\n\tplatforms = \"Posix\",\n\tcmdclass = {'build_py': build_py, 'build_scripts': build_scripts},\n\tscripts = [\n\t\t'bin/fail2ban-client',\n\t\t'bin/fail2ban-server',\n\t\t'bin/fail2ban-regex',\n\t\t'bin/fail2ban-testcases',\n\t],\n\tpackages = [\n\t\t'fail2ban',\n\t\t'fail2ban.client',\n\t\t'fail2ban.server',\n\t\t'fail2ban.tests',\n\t\t'fail2ban.tests.action_d',\n\t],\n\tpackage_data = {\n\t\t'fail2ban.tests':\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/files')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/config')\n\t\t\t\tfor f in w[2]] +\n\t\t\t[ join(w[0], f).replace(\"fail2ban/tests/\", \"\", 1)\n\t\t\t\tfor w in os.walk('fail2ban/tests/action_d')\n\t\t\t\tfor f in w[2]]\n\t},\n\tdata_files = [\n\t\t('/etc/fail2ban',\n\t\t\tglob(\"config/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d',\n\t\t\tglob(\"config/filter.d/*.conf\")\n\t\t),\n\t\t('/etc/fail2ban/filter.d/ignorecommands',\n\t\t\tglob(\"config/filter.d/ignorecommands/*\")\n\t\t),\n\t\t('/etc/fail2ban/action.d',\n\t\t\tglob(\"config/action.d/*.conf\") +\n\t\t\tglob(\"config/action.d/*.py\")\n\t\t),\n\t\t('/etc/fail2ban/fail2ban.d',\n\t\t\t''\n\t\t),\n\t\t('/etc/fail2ban/jail.d',\n\t\t\t''\n\t\t),\n\t\t('/var/lib/fail2ban',\n\t\t\t''\n\t\t),\n\t\t('/usr/share/doc/fail2ban',\n\t\t\t['README.md', 'README.Solaris', 'DEVELOP', 'FILTERS',\n\t\t\t 'doc/run-rootless.txt']\n\t\t)\n\t] + data_files_extra,\n\t**setup_extra\n)\n\n# Do some checks after installation\n# Search for obsolete files.\nobsoleteFiles = []\nelements = {\n\t\"/etc/\":\n\t\t[\n\t\t\t\"fail2ban.conf\"\n\t\t],\n\t\"/usr/bin/\":\n\t\t[\n\t\t\t\"fail2ban.py\"\n\t\t],\n\t\"/usr/lib/fail2ban/\":\n\t\t[\n\t\t\t\"version.py\",\n\t\t\t\"protocol.py\"\n\t\t]\n}\n\nfor directory in elements:\n\tfor f in elements[directory]:\n\t\tpath = join(directory, f)\n\t\tif isfile(path):\n\t\t\tobsoleteFiles.append(path)\n\nif obsoleteFiles:\n\tprint(\"\")\n\tprint(\"Obsolete files from previous Fail2Ban versions were found on \"\n\t\t \"your system.\")\n\tprint(\"Please delete them:\")\n\tprint(\"\")\n\tfor f in obsoleteFiles:\n\t\tprint(\"\\t\" + f)\n\tprint(\"\")\n\nif isdir(\"/usr/lib/fail2ban\"):\n\tprint(\"\")\n\tprint(\"Fail2ban is not installed under /usr/lib anymore. The new \"\n\t\t \"location is under /usr/share. Please remove the directory \"\n\t\t \"/usr/lib/fail2ban and everything under this directory.\")\n\tprint(\"\")\n\n# Update config file\nif sys.argv[1] == \"install\":\n\tprint(\"\")\n\tprint(\"Please do not forget to update your configuration files.\")\n\tprint(\"They are in /etc/fail2ban/.\")\n\tprint(\"\")\n", "path": "setup.py"}]} | 3,105 | 248 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.