problem_id
stringlengths 18
22
| source
stringclasses 1
value | task_type
stringclasses 1
value | in_source_id
stringlengths 13
58
| prompt
stringlengths 1.1k
25.4k
| golden_diff
stringlengths 145
5.13k
| verification_info
stringlengths 582
39.1k
| num_tokens
int64 271
4.1k
| num_tokens_diff
int64 47
1.02k
|
---|---|---|---|---|---|---|---|---|
gh_patches_debug_40467
|
rasdani/github-patches
|
git_diff
|
Parsl__parsl-1292
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Tasks per app graph appears as a sawtooth, not as rectangles
See attached plot.
This looks like it plots the number of data points at the point a task starts, and then the next point after a task ends, with linear interpolation between the two points. This is an incorrect visualisation: a task does not fade from existing to not existing over the entire duration of execution; instead it exists at full strength for the full duration of existence, and should be represented on the graph as a rectangular, not saw tooth, plot.

--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsl/monitoring/visualization/plots/default/workflow_plots.py`
Content:
```
1 import numpy as np
2 import plotly.graph_objs as go
3 import plotly.figure_factory as ff
4 from plotly.offline import plot
5 import networkx as nx
6 import datetime
7
8 from parsl.monitoring.visualization.utils import timestamp_to_int, num_to_timestamp, DB_DATE_FORMAT
9
10
11 def task_gantt_plot(df_task, time_completed=None):
12
13 df_task = df_task.sort_values(by=['task_time_submitted'], ascending=False)
14
15 # df_task['task_time_submitted'] = pd.to_datetime(df_task['task_time_submitted'], unit='s')
16 # df_task['task_time_returned'] = pd.to_datetime(df_task['task_time_returned'], unit='s')
17
18 # df_task = df_task.rename(index=str, columns={"task_id": "Task",
19 # "task_time_submitted": "Start",
20 # "task_time_returned": "Finish",
21 # })
22 # parsl_tasks = df_task.to_dict('records')
23 parsl_tasks = []
24 for i, task in df_task.iterrows():
25 time_running, time_returned = task['task_time_running'], task['task_time_returned']
26 if task['task_time_returned'] is None:
27 time_returned = datetime.datetime.now()
28 if time_completed is not None:
29 time_returned = time_completed
30 if task['task_time_running'] is None:
31 time_running = task['task_time_submitted']
32 description = "Task ID: {}, app: {}".format(task['task_id'], task['task_func_name'])
33 dic1 = dict(Task=description, Start=task['task_time_submitted'],
34 Finish=time_running, Resource="Pending")
35 dic2 = dict(Task=description, Start=time_running,
36 Finish=time_returned, Resource="Running")
37 parsl_tasks.extend([dic1, dic2])
38 colors = {'Pending': 'rgb(168, 168, 168)', 'Running': 'rgb(0, 0, 255)'}
39 fig = ff.create_gantt(parsl_tasks,
40 title="",
41 colors=colors,
42 group_tasks=True,
43 show_colorbar=True,
44 index_col='Resource',
45 )
46 fig['layout']['yaxis']['title'] = 'Task'
47 fig['layout']['yaxis']['showticklabels'] = False
48 fig['layout']['xaxis']['title'] = 'Time'
49 return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
50
51
52 def task_per_app_plot(df_task, df_status):
53
54 def y_axis_setup(array):
55 count = 0
56 items = []
57 for n in array:
58 if n:
59 count += 1
60 elif count > 0:
61 count -= 1
62 items.append(count)
63 return items
64
65 # Fill up dict "apps" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}
66 apps_dict = dict()
67 for i in range(len(df_task)):
68 row = df_task.iloc[i]
69 if row['task_func_name'] in apps_dict:
70 apps_dict[row['task_func_name']].append(row['task_id'])
71 else:
72 apps_dict[row['task_func_name']] = [row['task_id']]
73
74 fig = go.Figure(
75 data=[go.Scatter(x=df_status[df_status['task_id'].isin(tasks)]['timestamp'],
76 y=y_axis_setup(df_status[df_status['task_id'].isin(
77 tasks)]['task_status_name'] == 'running'),
78 name=app)
79 for app, tasks in apps_dict.items()] +
80 [go.Scatter(x=df_status['timestamp'],
81 y=y_axis_setup(
82 df_status['task_status_name'] == 'running'),
83 name='all')],
84 layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S',
85 autorange=True,
86 title='Time'),
87 yaxis=dict(tickformat=',d',
88 title='Tasks'),
89 hovermode='closest',
90 title='Tasks per app'))
91
92 return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
93
94
95 def total_tasks_plot(df_task, df_status, columns=20):
96
97 min_time = timestamp_to_int(min(df_status['timestamp']))
98 max_time = timestamp_to_int(max(df_status['timestamp']))
99 time_step = (max_time - min_time) / columns
100
101 x_axis = []
102 for i in np.arange(min_time, max_time + time_step, time_step):
103 x_axis.append(num_to_timestamp(i).strftime(DB_DATE_FORMAT))
104
105 # Fill up dict "apps" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}
106 apps_dict = dict()
107 for i in range(len(df_task)):
108 row = df_task.iloc[i]
109 if row['task_func_name'] in apps_dict:
110 apps_dict[row['task_func_name']].append(row['task_id'])
111 else:
112 apps_dict[row['task_func_name']] = [row['task_id']]
113
114 def y_axis_setup(value):
115 items = []
116 for app, tasks in apps_dict.items():
117 tmp = []
118 task = df_status[df_status['task_id'].isin(tasks)]
119 for i in range(len(x_axis) - 1):
120 x = task['timestamp'] >= x_axis[i]
121 y = task['timestamp'] < x_axis[i + 1]
122 tmp.append(sum(task.loc[x & y]['task_status_name'] == value))
123 items = np.sum([items, tmp], axis=0)
124
125 return items
126
127 y_axis_done = y_axis_setup('done')
128 y_axis_failed = y_axis_setup('failed')
129
130 fig = go.Figure(data=[go.Bar(x=x_axis[:-1],
131 y=y_axis_done,
132 name='done'),
133 go.Bar(x=x_axis[:-1],
134 y=y_axis_failed,
135 name='failed')],
136 layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S',
137 autorange=True,
138 title='Time'),
139 yaxis=dict(tickformat=',d',
140 title='Running tasks.' ' Bin width: ' + num_to_timestamp(time_step).strftime('%Mm%Ss')),
141 annotations=[
142 dict(
143 x=0,
144 y=1.07,
145 showarrow=False,
146 text='Total Done: ' +
147 str(sum(y_axis_done)),
148 xref='paper',
149 yref='paper'
150 ),
151 dict(
152 x=0,
153 y=1.05,
154 showarrow=False,
155 text='Total Failed: ' +
156 str(sum(y_axis_failed)),
157 xref='paper',
158 yref='paper'
159 ),
160 ],
161 barmode='stack',
162 title="Total tasks"))
163
164 return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
165
166
167 def workflow_dag_plot(df_tasks, group_by_apps=True):
168 G = nx.DiGraph(directed=True)
169 nodes = df_tasks['task_id'].unique()
170 dic = df_tasks.set_index('task_id').to_dict()
171 G.add_nodes_from(nodes)
172
173 # Add edges or links between the nodes:
174 edges = []
175 for k, v in dic['task_depends'].items():
176 if v:
177 adj = v.split(",")
178 for e in adj:
179 edges.append((int(e), k))
180 G.add_edges_from(edges)
181
182 node_positions = nx.nx_pydot.pydot_layout(G, prog='dot')
183 node_traces = []
184
185 if group_by_apps:
186 groups_list = {app: i for i, app in enumerate(
187 df_tasks['task_func_name'].unique())}
188 else:
189 groups_list = {'Pending': (0, 'gray'), "Running": (1, 'blue'), 'Completed': (2, 'green')}
190
191 for k, _ in groups_list.items():
192 node_trace = go.Scatter(
193 x=[],
194 y=[],
195 text=[],
196 mode='markers',
197 textposition='top center',
198 textfont=dict(
199 family='arial',
200 size=18,
201 color='rgb(0,0,0)'
202 ),
203 hoverinfo='text',
204 name=k, # legend app_name here
205 marker=dict(
206 showscale=False,
207 # color='rgb(200,0,0)',
208 size=11,
209 line=dict(width=1, color='rgb(0,0,0)')))
210 node_traces.append(node_trace)
211
212 for node in node_positions:
213 x, y = node_positions[node]
214 if group_by_apps:
215 name = dic['task_func_name'][node]
216 index = groups_list[name]
217 else:
218 if dic['task_time_returned'][node] is not None:
219 name = 'Completed'
220 elif dic['task_time_running'][node] is not None:
221 name = "Running"
222 elif dic['task_time_submitted'][node] is not None:
223 name = "Pending"
224 index, color = groups_list[name]
225 node_traces[index]['marker']['color'] = color
226 node_traces[index]['x'] += tuple([x])
227 node_traces[index]['y'] += tuple([y])
228 node_traces[index]['text'] += tuple(
229 ["{}:{}".format(dic['task_func_name'][node], node)])
230
231 # The edges will be drawn as lines:
232 edge_trace = go.Scatter(
233 x=[],
234 y=[],
235 line=dict(width=1, color='rgb(160,160,160)'),
236 hoverinfo='none',
237 # showlegend=False,
238 name='Dependency',
239 mode='lines')
240
241 for edge in G.edges:
242 x0, y0 = node_positions[edge[0]]
243 x1, y1 = node_positions[edge[1]]
244 edge_trace['x'] += tuple([x0, x1, None])
245 edge_trace['y'] += tuple([y0, y1, None])
246
247 # Create figure:
248 fig = go.Figure(data=[edge_trace] + node_traces,
249 layout=go.Layout(
250 title='Workflow DAG',
251 titlefont=dict(size=16),
252 showlegend=True,
253 hovermode='closest',
254 margin=dict(b=20, l=5, r=5, t=40), # noqa: E741
255 xaxis=dict(showgrid=False, zeroline=False,
256 showticklabels=False),
257 yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))
258 return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
259
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsl/monitoring/visualization/plots/default/workflow_plots.py b/parsl/monitoring/visualization/plots/default/workflow_plots.py
--- a/parsl/monitoring/visualization/plots/default/workflow_plots.py
+++ b/parsl/monitoring/visualization/plots/default/workflow_plots.py
@@ -1,4 +1,5 @@
import numpy as np
+import pandas as pd
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly.offline import plot
@@ -49,47 +50,39 @@
return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
-def task_per_app_plot(df_task, df_status):
-
- def y_axis_setup(array):
- count = 0
- items = []
- for n in array:
- if n:
- count += 1
- elif count > 0:
- count -= 1
- items.append(count)
- return items
-
- # Fill up dict "apps" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}
- apps_dict = dict()
- for i in range(len(df_task)):
- row = df_task.iloc[i]
- if row['task_func_name'] in apps_dict:
- apps_dict[row['task_func_name']].append(row['task_id'])
- else:
- apps_dict[row['task_func_name']] = [row['task_id']]
-
- fig = go.Figure(
- data=[go.Scatter(x=df_status[df_status['task_id'].isin(tasks)]['timestamp'],
- y=y_axis_setup(df_status[df_status['task_id'].isin(
- tasks)]['task_status_name'] == 'running'),
- name=app)
- for app, tasks in apps_dict.items()] +
- [go.Scatter(x=df_status['timestamp'],
- y=y_axis_setup(
- df_status['task_status_name'] == 'running'),
- name='all')],
- layout=go.Layout(xaxis=dict(tickformat='%m-%d\n%H:%M:%S',
- autorange=True,
- title='Time'),
- yaxis=dict(tickformat=',d',
- title='Tasks'),
- hovermode='closest',
- title='Tasks per app'))
-
- return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
+def task_per_app_plot(task, status):
+
+ try:
+ task['epoch_time_running'] = (pd.to_datetime(
+ task['task_time_running']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
+ task['epoch_time_returned'] = (pd.to_datetime(
+ task['task_time_returned']) - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s')
+ start = task['epoch_time_running'].min()
+ end = task['epoch_time_returned'].max()
+ tasks_per_app = {}
+ all_tasks = [0] * (end - start + 1)
+ for i, row in task.iterrows():
+ if row['task_func_name'] not in tasks_per_app:
+ tasks_per_app[row['task_func_name']] = [0] * (end - start + 1)
+ for j in range(int(row['epoch_time_running']) + 1, int(row['epoch_time_returned']) + 1):
+ tasks_per_app[row['task_func_name']][j - start] += 1
+ all_tasks[j - start] += 1
+ fig = go.Figure(
+ data=[go.Scatter(x=list(range(0, end - start + 1)),
+ y=tasks_per_app[app],
+ name=app,
+ ) for app in tasks_per_app] +
+ [go.Scatter(x=list(range(0, end - start + 1)),
+ y=all_tasks,
+ name='All',
+ )],
+ layout=go.Layout(xaxis=dict(autorange=True,
+ title='Time (seconds)'),
+ yaxis=dict(title='Number of tasks'),
+ title="Tasks per app"))
+ return plot(fig, show_link=False, output_type="div", include_plotlyjs=False)
+ except Exception as e:
+ return "The tasks per app plot cannot be generated because of exception {}.".format(e)
def total_tasks_plot(df_task, df_status, columns=20):
|
{"golden_diff": "diff --git a/parsl/monitoring/visualization/plots/default/workflow_plots.py b/parsl/monitoring/visualization/plots/default/workflow_plots.py\n--- a/parsl/monitoring/visualization/plots/default/workflow_plots.py\n+++ b/parsl/monitoring/visualization/plots/default/workflow_plots.py\n@@ -1,4 +1,5 @@\n import numpy as np\n+import pandas as pd\n import plotly.graph_objs as go\n import plotly.figure_factory as ff\n from plotly.offline import plot\n@@ -49,47 +50,39 @@\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n \n \n-def task_per_app_plot(df_task, df_status):\n-\n- def y_axis_setup(array):\n- count = 0\n- items = []\n- for n in array:\n- if n:\n- count += 1\n- elif count > 0:\n- count -= 1\n- items.append(count)\n- return items\n-\n- # Fill up dict \"apps\" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}\n- apps_dict = dict()\n- for i in range(len(df_task)):\n- row = df_task.iloc[i]\n- if row['task_func_name'] in apps_dict:\n- apps_dict[row['task_func_name']].append(row['task_id'])\n- else:\n- apps_dict[row['task_func_name']] = [row['task_id']]\n-\n- fig = go.Figure(\n- data=[go.Scatter(x=df_status[df_status['task_id'].isin(tasks)]['timestamp'],\n- y=y_axis_setup(df_status[df_status['task_id'].isin(\n- tasks)]['task_status_name'] == 'running'),\n- name=app)\n- for app, tasks in apps_dict.items()] +\n- [go.Scatter(x=df_status['timestamp'],\n- y=y_axis_setup(\n- df_status['task_status_name'] == 'running'),\n- name='all')],\n- layout=go.Layout(xaxis=dict(tickformat='%m-%d\\n%H:%M:%S',\n- autorange=True,\n- title='Time'),\n- yaxis=dict(tickformat=',d',\n- title='Tasks'),\n- hovermode='closest',\n- title='Tasks per app'))\n-\n- return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n+def task_per_app_plot(task, status):\n+\n+ try:\n+ task['epoch_time_running'] = (pd.to_datetime(\n+ task['task_time_running']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n+ task['epoch_time_returned'] = (pd.to_datetime(\n+ task['task_time_returned']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n+ start = task['epoch_time_running'].min()\n+ end = task['epoch_time_returned'].max()\n+ tasks_per_app = {}\n+ all_tasks = [0] * (end - start + 1)\n+ for i, row in task.iterrows():\n+ if row['task_func_name'] not in tasks_per_app:\n+ tasks_per_app[row['task_func_name']] = [0] * (end - start + 1)\n+ for j in range(int(row['epoch_time_running']) + 1, int(row['epoch_time_returned']) + 1):\n+ tasks_per_app[row['task_func_name']][j - start] += 1\n+ all_tasks[j - start] += 1\n+ fig = go.Figure(\n+ data=[go.Scatter(x=list(range(0, end - start + 1)),\n+ y=tasks_per_app[app],\n+ name=app,\n+ ) for app in tasks_per_app] +\n+ [go.Scatter(x=list(range(0, end - start + 1)),\n+ y=all_tasks,\n+ name='All',\n+ )],\n+ layout=go.Layout(xaxis=dict(autorange=True,\n+ title='Time (seconds)'),\n+ yaxis=dict(title='Number of tasks'),\n+ title=\"Tasks per app\"))\n+ return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n+ except Exception as e:\n+ return \"The tasks per app plot cannot be generated because of exception {}.\".format(e)\n \n \n def total_tasks_plot(df_task, df_status, columns=20):\n", "issue": "Tasks per app graph appears as a sawtooth, not as rectangles\nSee attached plot.\r\n\r\nThis looks like it plots the number of data points at the point a task starts, and then the next point after a task ends, with linear interpolation between the two points. This is an incorrect visualisation: a task does not fade from existing to not existing over the entire duration of execution; instead it exists at full strength for the full duration of existence, and should be represented on the graph as a rectangular, not saw tooth, plot.\r\n\r\n\r\n\n", "before_files": [{"content": "import numpy as np\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\nfrom plotly.offline import plot\nimport networkx as nx\nimport datetime\n\nfrom parsl.monitoring.visualization.utils import timestamp_to_int, num_to_timestamp, DB_DATE_FORMAT\n\n\ndef task_gantt_plot(df_task, time_completed=None):\n\n df_task = df_task.sort_values(by=['task_time_submitted'], ascending=False)\n\n # df_task['task_time_submitted'] = pd.to_datetime(df_task['task_time_submitted'], unit='s')\n # df_task['task_time_returned'] = pd.to_datetime(df_task['task_time_returned'], unit='s')\n\n # df_task = df_task.rename(index=str, columns={\"task_id\": \"Task\",\n # \"task_time_submitted\": \"Start\",\n # \"task_time_returned\": \"Finish\",\n # })\n # parsl_tasks = df_task.to_dict('records')\n parsl_tasks = []\n for i, task in df_task.iterrows():\n time_running, time_returned = task['task_time_running'], task['task_time_returned']\n if task['task_time_returned'] is None:\n time_returned = datetime.datetime.now()\n if time_completed is not None:\n time_returned = time_completed\n if task['task_time_running'] is None:\n time_running = task['task_time_submitted']\n description = \"Task ID: {}, app: {}\".format(task['task_id'], task['task_func_name'])\n dic1 = dict(Task=description, Start=task['task_time_submitted'],\n Finish=time_running, Resource=\"Pending\")\n dic2 = dict(Task=description, Start=time_running,\n Finish=time_returned, Resource=\"Running\")\n parsl_tasks.extend([dic1, dic2])\n colors = {'Pending': 'rgb(168, 168, 168)', 'Running': 'rgb(0, 0, 255)'}\n fig = ff.create_gantt(parsl_tasks,\n title=\"\",\n colors=colors,\n group_tasks=True,\n show_colorbar=True,\n index_col='Resource',\n )\n fig['layout']['yaxis']['title'] = 'Task'\n fig['layout']['yaxis']['showticklabels'] = False\n fig['layout']['xaxis']['title'] = 'Time'\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef task_per_app_plot(df_task, df_status):\n\n def y_axis_setup(array):\n count = 0\n items = []\n for n in array:\n if n:\n count += 1\n elif count > 0:\n count -= 1\n items.append(count)\n return items\n\n # Fill up dict \"apps\" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}\n apps_dict = dict()\n for i in range(len(df_task)):\n row = df_task.iloc[i]\n if row['task_func_name'] in apps_dict:\n apps_dict[row['task_func_name']].append(row['task_id'])\n else:\n apps_dict[row['task_func_name']] = [row['task_id']]\n\n fig = go.Figure(\n data=[go.Scatter(x=df_status[df_status['task_id'].isin(tasks)]['timestamp'],\n y=y_axis_setup(df_status[df_status['task_id'].isin(\n tasks)]['task_status_name'] == 'running'),\n name=app)\n for app, tasks in apps_dict.items()] +\n [go.Scatter(x=df_status['timestamp'],\n y=y_axis_setup(\n df_status['task_status_name'] == 'running'),\n name='all')],\n layout=go.Layout(xaxis=dict(tickformat='%m-%d\\n%H:%M:%S',\n autorange=True,\n title='Time'),\n yaxis=dict(tickformat=',d',\n title='Tasks'),\n hovermode='closest',\n title='Tasks per app'))\n\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef total_tasks_plot(df_task, df_status, columns=20):\n\n min_time = timestamp_to_int(min(df_status['timestamp']))\n max_time = timestamp_to_int(max(df_status['timestamp']))\n time_step = (max_time - min_time) / columns\n\n x_axis = []\n for i in np.arange(min_time, max_time + time_step, time_step):\n x_axis.append(num_to_timestamp(i).strftime(DB_DATE_FORMAT))\n\n # Fill up dict \"apps\" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}\n apps_dict = dict()\n for i in range(len(df_task)):\n row = df_task.iloc[i]\n if row['task_func_name'] in apps_dict:\n apps_dict[row['task_func_name']].append(row['task_id'])\n else:\n apps_dict[row['task_func_name']] = [row['task_id']]\n\n def y_axis_setup(value):\n items = []\n for app, tasks in apps_dict.items():\n tmp = []\n task = df_status[df_status['task_id'].isin(tasks)]\n for i in range(len(x_axis) - 1):\n x = task['timestamp'] >= x_axis[i]\n y = task['timestamp'] < x_axis[i + 1]\n tmp.append(sum(task.loc[x & y]['task_status_name'] == value))\n items = np.sum([items, tmp], axis=0)\n\n return items\n\n y_axis_done = y_axis_setup('done')\n y_axis_failed = y_axis_setup('failed')\n\n fig = go.Figure(data=[go.Bar(x=x_axis[:-1],\n y=y_axis_done,\n name='done'),\n go.Bar(x=x_axis[:-1],\n y=y_axis_failed,\n name='failed')],\n layout=go.Layout(xaxis=dict(tickformat='%m-%d\\n%H:%M:%S',\n autorange=True,\n title='Time'),\n yaxis=dict(tickformat=',d',\n title='Running tasks.' ' Bin width: ' + num_to_timestamp(time_step).strftime('%Mm%Ss')),\n annotations=[\n dict(\n x=0,\n y=1.07,\n showarrow=False,\n text='Total Done: ' +\n str(sum(y_axis_done)),\n xref='paper',\n yref='paper'\n ),\n dict(\n x=0,\n y=1.05,\n showarrow=False,\n text='Total Failed: ' +\n str(sum(y_axis_failed)),\n xref='paper',\n yref='paper'\n ),\n ],\n barmode='stack',\n title=\"Total tasks\"))\n\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef workflow_dag_plot(df_tasks, group_by_apps=True):\n G = nx.DiGraph(directed=True)\n nodes = df_tasks['task_id'].unique()\n dic = df_tasks.set_index('task_id').to_dict()\n G.add_nodes_from(nodes)\n\n # Add edges or links between the nodes:\n edges = []\n for k, v in dic['task_depends'].items():\n if v:\n adj = v.split(\",\")\n for e in adj:\n edges.append((int(e), k))\n G.add_edges_from(edges)\n\n node_positions = nx.nx_pydot.pydot_layout(G, prog='dot')\n node_traces = []\n\n if group_by_apps:\n groups_list = {app: i for i, app in enumerate(\n df_tasks['task_func_name'].unique())}\n else:\n groups_list = {'Pending': (0, 'gray'), \"Running\": (1, 'blue'), 'Completed': (2, 'green')}\n\n for k, _ in groups_list.items():\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n mode='markers',\n textposition='top center',\n textfont=dict(\n family='arial',\n size=18,\n color='rgb(0,0,0)'\n ),\n hoverinfo='text',\n name=k, # legend app_name here\n marker=dict(\n showscale=False,\n # color='rgb(200,0,0)',\n size=11,\n line=dict(width=1, color='rgb(0,0,0)')))\n node_traces.append(node_trace)\n\n for node in node_positions:\n x, y = node_positions[node]\n if group_by_apps:\n name = dic['task_func_name'][node]\n index = groups_list[name]\n else:\n if dic['task_time_returned'][node] is not None:\n name = 'Completed'\n elif dic['task_time_running'][node] is not None:\n name = \"Running\"\n elif dic['task_time_submitted'][node] is not None:\n name = \"Pending\"\n index, color = groups_list[name]\n node_traces[index]['marker']['color'] = color\n node_traces[index]['x'] += tuple([x])\n node_traces[index]['y'] += tuple([y])\n node_traces[index]['text'] += tuple(\n [\"{}:{}\".format(dic['task_func_name'][node], node)])\n\n # The edges will be drawn as lines:\n edge_trace = go.Scatter(\n x=[],\n y=[],\n line=dict(width=1, color='rgb(160,160,160)'),\n hoverinfo='none',\n # showlegend=False,\n name='Dependency',\n mode='lines')\n\n for edge in G.edges:\n x0, y0 = node_positions[edge[0]]\n x1, y1 = node_positions[edge[1]]\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n # Create figure:\n fig = go.Figure(data=[edge_trace] + node_traces,\n layout=go.Layout(\n title='Workflow DAG',\n titlefont=dict(size=16),\n showlegend=True,\n hovermode='closest',\n margin=dict(b=20, l=5, r=5, t=40), # noqa: E741\n xaxis=dict(showgrid=False, zeroline=False,\n showticklabels=False),\n yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n", "path": "parsl/monitoring/visualization/plots/default/workflow_plots.py"}], "after_files": [{"content": "import numpy as np\nimport pandas as pd\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\nfrom plotly.offline import plot\nimport networkx as nx\nimport datetime\n\nfrom parsl.monitoring.visualization.utils import timestamp_to_int, num_to_timestamp, DB_DATE_FORMAT\n\n\ndef task_gantt_plot(df_task, time_completed=None):\n\n df_task = df_task.sort_values(by=['task_time_submitted'], ascending=False)\n\n # df_task['task_time_submitted'] = pd.to_datetime(df_task['task_time_submitted'], unit='s')\n # df_task['task_time_returned'] = pd.to_datetime(df_task['task_time_returned'], unit='s')\n\n # df_task = df_task.rename(index=str, columns={\"task_id\": \"Task\",\n # \"task_time_submitted\": \"Start\",\n # \"task_time_returned\": \"Finish\",\n # })\n # parsl_tasks = df_task.to_dict('records')\n parsl_tasks = []\n for i, task in df_task.iterrows():\n time_running, time_returned = task['task_time_running'], task['task_time_returned']\n if task['task_time_returned'] is None:\n time_returned = datetime.datetime.now()\n if time_completed is not None:\n time_returned = time_completed\n if task['task_time_running'] is None:\n time_running = task['task_time_submitted']\n description = \"Task ID: {}, app: {}\".format(task['task_id'], task['task_func_name'])\n dic1 = dict(Task=description, Start=task['task_time_submitted'],\n Finish=time_running, Resource=\"Pending\")\n dic2 = dict(Task=description, Start=time_running,\n Finish=time_returned, Resource=\"Running\")\n parsl_tasks.extend([dic1, dic2])\n colors = {'Pending': 'rgb(168, 168, 168)', 'Running': 'rgb(0, 0, 255)'}\n fig = ff.create_gantt(parsl_tasks,\n title=\"\",\n colors=colors,\n group_tasks=True,\n show_colorbar=True,\n index_col='Resource',\n )\n fig['layout']['yaxis']['title'] = 'Task'\n fig['layout']['yaxis']['showticklabels'] = False\n fig['layout']['xaxis']['title'] = 'Time'\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef task_per_app_plot(task, status):\n\n try:\n task['epoch_time_running'] = (pd.to_datetime(\n task['task_time_running']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n task['epoch_time_returned'] = (pd.to_datetime(\n task['task_time_returned']) - pd.Timestamp(\"1970-01-01\")) // pd.Timedelta('1s')\n start = task['epoch_time_running'].min()\n end = task['epoch_time_returned'].max()\n tasks_per_app = {}\n all_tasks = [0] * (end - start + 1)\n for i, row in task.iterrows():\n if row['task_func_name'] not in tasks_per_app:\n tasks_per_app[row['task_func_name']] = [0] * (end - start + 1)\n for j in range(int(row['epoch_time_running']) + 1, int(row['epoch_time_returned']) + 1):\n tasks_per_app[row['task_func_name']][j - start] += 1\n all_tasks[j - start] += 1\n fig = go.Figure(\n data=[go.Scatter(x=list(range(0, end - start + 1)),\n y=tasks_per_app[app],\n name=app,\n ) for app in tasks_per_app] +\n [go.Scatter(x=list(range(0, end - start + 1)),\n y=all_tasks,\n name='All',\n )],\n layout=go.Layout(xaxis=dict(autorange=True,\n title='Time (seconds)'),\n yaxis=dict(title='Number of tasks'),\n title=\"Tasks per app\"))\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n except Exception as e:\n return \"The tasks per app plot cannot be generated because of exception {}.\".format(e)\n\n\ndef total_tasks_plot(df_task, df_status, columns=20):\n\n min_time = timestamp_to_int(min(df_status['timestamp']))\n max_time = timestamp_to_int(max(df_status['timestamp']))\n time_step = (max_time - min_time) / columns\n\n x_axis = []\n for i in np.arange(min_time, max_time + time_step, time_step):\n x_axis.append(num_to_timestamp(i).strftime(DB_DATE_FORMAT))\n\n # Fill up dict \"apps\" like: {app1: [#task1, #task2], app2: [#task4], app3: [#task3]}\n apps_dict = dict()\n for i in range(len(df_task)):\n row = df_task.iloc[i]\n if row['task_func_name'] in apps_dict:\n apps_dict[row['task_func_name']].append(row['task_id'])\n else:\n apps_dict[row['task_func_name']] = [row['task_id']]\n\n def y_axis_setup(value):\n items = []\n for app, tasks in apps_dict.items():\n tmp = []\n task = df_status[df_status['task_id'].isin(tasks)]\n for i in range(len(x_axis) - 1):\n x = task['timestamp'] >= x_axis[i]\n y = task['timestamp'] < x_axis[i + 1]\n tmp.append(sum(task.loc[x & y]['task_status_name'] == value))\n items = np.sum([items, tmp], axis=0)\n\n return items\n\n y_axis_done = y_axis_setup('done')\n y_axis_failed = y_axis_setup('failed')\n\n fig = go.Figure(data=[go.Bar(x=x_axis[:-1],\n y=y_axis_done,\n name='done'),\n go.Bar(x=x_axis[:-1],\n y=y_axis_failed,\n name='failed')],\n layout=go.Layout(xaxis=dict(tickformat='%m-%d\\n%H:%M:%S',\n autorange=True,\n title='Time'),\n yaxis=dict(tickformat=',d',\n title='Running tasks.' ' Bin width: ' + num_to_timestamp(time_step).strftime('%Mm%Ss')),\n annotations=[\n dict(\n x=0,\n y=1.07,\n showarrow=False,\n text='Total Done: ' +\n str(sum(y_axis_done)),\n xref='paper',\n yref='paper'\n ),\n dict(\n x=0,\n y=1.05,\n showarrow=False,\n text='Total Failed: ' +\n str(sum(y_axis_failed)),\n xref='paper',\n yref='paper'\n ),\n ],\n barmode='stack',\n title=\"Total tasks\"))\n\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n\n\ndef workflow_dag_plot(df_tasks, group_by_apps=True):\n G = nx.DiGraph(directed=True)\n nodes = df_tasks['task_id'].unique()\n dic = df_tasks.set_index('task_id').to_dict()\n G.add_nodes_from(nodes)\n\n # Add edges or links between the nodes:\n edges = []\n for k, v in dic['task_depends'].items():\n if v:\n adj = v.split(\",\")\n for e in adj:\n edges.append((int(e), k))\n G.add_edges_from(edges)\n\n node_positions = nx.nx_pydot.pydot_layout(G, prog='dot')\n node_traces = []\n\n if group_by_apps:\n groups_list = {app: i for i, app in enumerate(\n df_tasks['task_func_name'].unique())}\n else:\n groups_list = {'Pending': (0, 'gray'), \"Running\": (1, 'blue'), 'Completed': (2, 'green')}\n\n for k, _ in groups_list.items():\n node_trace = go.Scatter(\n x=[],\n y=[],\n text=[],\n mode='markers',\n textposition='top center',\n textfont=dict(\n family='arial',\n size=18,\n color='rgb(0,0,0)'\n ),\n hoverinfo='text',\n name=k, # legend app_name here\n marker=dict(\n showscale=False,\n # color='rgb(200,0,0)',\n size=11,\n line=dict(width=1, color='rgb(0,0,0)')))\n node_traces.append(node_trace)\n\n for node in node_positions:\n x, y = node_positions[node]\n if group_by_apps:\n name = dic['task_func_name'][node]\n index = groups_list[name]\n else:\n if dic['task_time_returned'][node] is not None:\n name = 'Completed'\n elif dic['task_time_running'][node] is not None:\n name = \"Running\"\n elif dic['task_time_submitted'][node] is not None:\n name = \"Pending\"\n index, color = groups_list[name]\n node_traces[index]['marker']['color'] = color\n node_traces[index]['x'] += tuple([x])\n node_traces[index]['y'] += tuple([y])\n node_traces[index]['text'] += tuple(\n [\"{}:{}\".format(dic['task_func_name'][node], node)])\n\n # The edges will be drawn as lines:\n edge_trace = go.Scatter(\n x=[],\n y=[],\n line=dict(width=1, color='rgb(160,160,160)'),\n hoverinfo='none',\n # showlegend=False,\n name='Dependency',\n mode='lines')\n\n for edge in G.edges:\n x0, y0 = node_positions[edge[0]]\n x1, y1 = node_positions[edge[1]]\n edge_trace['x'] += tuple([x0, x1, None])\n edge_trace['y'] += tuple([y0, y1, None])\n\n # Create figure:\n fig = go.Figure(data=[edge_trace] + node_traces,\n layout=go.Layout(\n title='Workflow DAG',\n titlefont=dict(size=16),\n showlegend=True,\n hovermode='closest',\n margin=dict(b=20, l=5, r=5, t=40), # noqa: E741\n xaxis=dict(showgrid=False, zeroline=False,\n showticklabels=False),\n yaxis=dict(showgrid=False, zeroline=False, showticklabels=False)))\n return plot(fig, show_link=False, output_type=\"div\", include_plotlyjs=False)\n", "path": "parsl/monitoring/visualization/plots/default/workflow_plots.py"}]}
| 3,414 | 1,012 |
gh_patches_debug_1417
|
rasdani/github-patches
|
git_diff
|
getmoto__moto-1400
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
mock_xray_client cannot be used as a context manager
PR #1255 added support for `aws_xray_sdk` which is great.
But there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.
As a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.
Also, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.
I will prepare a PR for the return value issue soon.
Also I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.
Installed version: `moto-ext==1.1.25`
The problem seemingly persists in current `master` branch.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `moto/xray/mock_client.py`
Content:
```
1 from functools import wraps
2 import os
3 from moto.xray import xray_backends
4 import aws_xray_sdk.core
5 from aws_xray_sdk.core.context import Context as AWSContext
6 from aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter
7
8
9 class MockEmitter(UDPEmitter):
10 """
11 Replaces the code that sends UDP to local X-Ray daemon
12 """
13 def __init__(self, daemon_address='127.0.0.1:2000'):
14 address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)
15 self._ip, self._port = self._parse_address(address)
16
17 def _xray_backend(self, region):
18 return xray_backends[region]
19
20 def send_entity(self, entity):
21 # Hack to get region
22 # region = entity.subsegments[0].aws['region']
23 # xray = self._xray_backend(region)
24
25 # TODO store X-Ray data, pretty sure X-Ray needs refactor for this
26 pass
27
28 def _send_data(self, data):
29 raise RuntimeError('Should not be running this')
30
31
32 def mock_xray_client(f):
33 """
34 Mocks the X-Ray sdk by pwning its evil singleton with our methods
35
36 The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.
37 This means the Context() will be very unhappy if an env var isnt present, so we set that, save
38 the old context, then supply our new context.
39 We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing
40 that itno the recorder instance.
41 """
42 @wraps(f)
43 def _wrapped(*args, **kwargs):
44 print("Starting X-Ray Patch")
45
46 old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')
47 os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'
48 old_xray_context = aws_xray_sdk.core.xray_recorder._context
49 old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter
50 aws_xray_sdk.core.xray_recorder._context = AWSContext()
51 aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
52
53 try:
54 f(*args, **kwargs)
55 finally:
56
57 if old_xray_context_var is None:
58 del os.environ['AWS_XRAY_CONTEXT_MISSING']
59 else:
60 os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var
61
62 aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter
63 aws_xray_sdk.core.xray_recorder._context = old_xray_context
64
65 return _wrapped
66
67
68 class XRaySegment(object):
69 """
70 XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark
71 the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated
72 by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop
73 the segment, thus causing it to be emitted via UDP.
74
75 During testing we're going to have to control the start and end of a segment via context managers.
76 """
77 def __enter__(self):
78 aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)
79
80 return self
81
82 def __exit__(self, exc_type, exc_val, exc_tb):
83 aws_xray_sdk.core.xray_recorder.end_segment()
84
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py
--- a/moto/xray/mock_client.py
+++ b/moto/xray/mock_client.py
@@ -51,7 +51,7 @@
aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()
try:
- f(*args, **kwargs)
+ return f(*args, **kwargs)
finally:
if old_xray_context_var is None:
|
{"golden_diff": "diff --git a/moto/xray/mock_client.py b/moto/xray/mock_client.py\n--- a/moto/xray/mock_client.py\n+++ b/moto/xray/mock_client.py\n@@ -51,7 +51,7 @@\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n \n try:\n- f(*args, **kwargs)\n+ return f(*args, **kwargs)\n finally:\n \n if old_xray_context_var is None:\n", "issue": "mock_xray_client cannot be used as a context manager\nPR #1255 added support for `aws_xray_sdk` which is great.\r\nBut there is a problem with it: `moto.mock_xray_client` is *only* a function decorator, and unlike all other `mock_*` methods it cannot be used as a context manager or directly with `start()`...`stop()`.\r\nAs a result, it is not possible to write a `py.test` fixture which would add support for mocking `xray_client`.\r\n\r\nAlso, `mock_xray_client` does not return the result of the function it decorates. Given it is meant to be used to decorate test functions it is most likely not a big issue, but I think it is still worth fixing.\r\n\r\nI will prepare a PR for the return value issue soon.\r\nAlso I am thinking about refactoring `mock_xray_client` to base it on the existing infrastructure (`BaseBackend`, `base_decorator`) but am not yet enough familiar with `moto` internals to be sure which would be the best way to implement it.\r\n\r\nInstalled version: `moto-ext==1.1.25`\r\nThe problem seemingly persists in current `master` branch.\n", "before_files": [{"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n", "path": "moto/xray/mock_client.py"}], "after_files": [{"content": "from functools import wraps\nimport os\nfrom moto.xray import xray_backends\nimport aws_xray_sdk.core\nfrom aws_xray_sdk.core.context import Context as AWSContext\nfrom aws_xray_sdk.core.emitters.udp_emitter import UDPEmitter\n\n\nclass MockEmitter(UDPEmitter):\n \"\"\"\n Replaces the code that sends UDP to local X-Ray daemon\n \"\"\"\n def __init__(self, daemon_address='127.0.0.1:2000'):\n address = os.getenv('AWS_XRAY_DAEMON_ADDRESS_YEAH_NOT_TODAY_MATE', daemon_address)\n self._ip, self._port = self._parse_address(address)\n\n def _xray_backend(self, region):\n return xray_backends[region]\n\n def send_entity(self, entity):\n # Hack to get region\n # region = entity.subsegments[0].aws['region']\n # xray = self._xray_backend(region)\n\n # TODO store X-Ray data, pretty sure X-Ray needs refactor for this\n pass\n\n def _send_data(self, data):\n raise RuntimeError('Should not be running this')\n\n\ndef mock_xray_client(f):\n \"\"\"\n Mocks the X-Ray sdk by pwning its evil singleton with our methods\n\n The X-Ray SDK has normally been imported and `patched()` called long before we start mocking.\n This means the Context() will be very unhappy if an env var isnt present, so we set that, save\n the old context, then supply our new context.\n We also patch the Emitter by subclassing the UDPEmitter class replacing its methods and pushing\n that itno the recorder instance.\n \"\"\"\n @wraps(f)\n def _wrapped(*args, **kwargs):\n print(\"Starting X-Ray Patch\")\n\n old_xray_context_var = os.environ.get('AWS_XRAY_CONTEXT_MISSING')\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = 'LOG_ERROR'\n old_xray_context = aws_xray_sdk.core.xray_recorder._context\n old_xray_emitter = aws_xray_sdk.core.xray_recorder._emitter\n aws_xray_sdk.core.xray_recorder._context = AWSContext()\n aws_xray_sdk.core.xray_recorder._emitter = MockEmitter()\n\n try:\n return f(*args, **kwargs)\n finally:\n\n if old_xray_context_var is None:\n del os.environ['AWS_XRAY_CONTEXT_MISSING']\n else:\n os.environ['AWS_XRAY_CONTEXT_MISSING'] = old_xray_context_var\n\n aws_xray_sdk.core.xray_recorder._emitter = old_xray_emitter\n aws_xray_sdk.core.xray_recorder._context = old_xray_context\n\n return _wrapped\n\n\nclass XRaySegment(object):\n \"\"\"\n XRay is request oriented, when a request comes in, normally middleware like django (or automatically in lambda) will mark\n the start of a segment, this stay open during the lifetime of the request. During that time subsegments may be generated\n by calling other SDK aware services or using some boto functions. Once the request is finished, middleware will also stop\n the segment, thus causing it to be emitted via UDP.\n\n During testing we're going to have to control the start and end of a segment via context managers.\n \"\"\"\n def __enter__(self):\n aws_xray_sdk.core.xray_recorder.begin_segment(name='moto_mock', traceid=None, parent_id=None, sampling=1)\n\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n aws_xray_sdk.core.xray_recorder.end_segment()\n", "path": "moto/xray/mock_client.py"}]}
| 1,463 | 106 |
gh_patches_debug_36728
|
rasdani/github-patches
|
git_diff
|
mkdocs__mkdocs-2427
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Re-building w/ symbolic links stopped working, regression after #2385
Since a444c43 in master using the local development server via `mkdocs serve` updating files that are symbolically linked is not triggering to rebuild (and therefore not reloading browser tabs).
On first glance this is due to the switch to watchdog for detecting file-system changes which needs more guidance to handle this file-type.
Preparing a PR with a patch.
Ref: a444c43474f91dea089922dd8fb188d1db3a4535
restore re-building with symbolic-links, closes #2425
previously (1.1.2 + master at 23e2051) building was triggered by changes
of file-content that was symbolically linked within docs_dir while
`mkdocs serve` was running.
since migrating from livereload>=2.6.1 to watchdog>=2.0.0 to detect
file-system changes (triggering the re-build) it stopped working.
this is because watchdog does not support symbolic links out of the box,
e.g. see [1].
change is to provide additional observe instructions on the realpath [2]
for the following cases:
1. docs_dir & config_file_path path deviation:
when the absolute path to either the `docs_dir` or the `config_file` is
different from its realpath, the realpath is added for observing (as
well).
2. symbolic links within docs_dir:
if a file within docs_dir is a symbolic link, the files real path
is added for observing. sub-directories (that are not symbolically
linked) are traversed up to a depth of nine levels (only if the
recursive flag is enabled, otherwise no traversal into sub-directories).
Ref: 23e205153f01d24d50fe9ba18e5186cdbc2c2dbe
[1]: https://github.com/gorakhargosh/watchdog/issues/365
[2]: <https://docs.python.org/3.8/library/os.path.html#os.path.realpath>
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mkdocs/livereload/__init__.py`
Content:
```
1 import functools
2 import io
3 import logging
4 import mimetypes
5 import os
6 import os.path
7 import re
8 import socketserver
9 import threading
10 import time
11 import warnings
12 import wsgiref.simple_server
13
14 import watchdog.events
15 import watchdog.observers
16
17
18 class _LoggerAdapter(logging.LoggerAdapter):
19 def process(self, msg, kwargs):
20 return time.strftime("[%H:%M:%S] ") + msg, kwargs
21
22
23 log = _LoggerAdapter(logging.getLogger(__name__), {})
24
25
26 class LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):
27 daemon_threads = True
28 poll_response_timeout = 60
29
30 def __init__(
31 self,
32 builder,
33 host,
34 port,
35 root,
36 mount_path="/",
37 build_delay=0.25,
38 shutdown_delay=0.25,
39 **kwargs,
40 ):
41 self.builder = builder
42 self.server_name = host
43 self.server_port = port
44 self.root = os.path.abspath(root)
45 self.mount_path = ("/" + mount_path.lstrip("/")).rstrip("/") + "/"
46 self.url = f"http://{self.server_name}:{self.server_port}{self.mount_path}"
47 self.build_delay = build_delay
48 self.shutdown_delay = shutdown_delay
49 # To allow custom error pages.
50 self.error_handler = lambda code: None
51
52 super().__init__((host, port), _Handler, **kwargs)
53 self.set_app(self.serve_request)
54
55 self._wanted_epoch = _timestamp() # The version of the site that started building.
56 self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.
57 self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.
58
59 self._to_rebuild = {} # Used as an ordered set of functions to call.
60 self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.
61
62 self._shutdown = False
63 self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))
64 self.observer = watchdog.observers.Observer(timeout=shutdown_delay)
65
66 def watch(self, path, func=None, recursive=True):
67 """Add the 'path' to watched paths, call the function and reload when any file changes under it."""
68 path = os.path.abspath(path)
69 if func in (None, self.builder):
70 func = self.builder
71 else:
72 warnings.warn(
73 "Plugins should not pass the 'func' parameter of watch(). "
74 "The ability to execute custom callbacks will be removed soon.",
75 DeprecationWarning,
76 stacklevel=2,
77 )
78
79 def callback(event):
80 if event.is_directory:
81 return
82 # Text editors always cause a "file close" event in addition to "modified" when saving
83 # a file. Some editors also have "swap" functionality that keeps writing into another
84 # file that's never closed. Prevent such write events from causing a rebuild.
85 if isinstance(event, watchdog.events.FileModifiedEvent):
86 # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:
87 if type(self.observer).__name__ == "InotifyObserver":
88 return
89 log.debug(str(event))
90 with self._rebuild_cond:
91 self._to_rebuild[func] = True
92 self._rebuild_cond.notify_all()
93
94 handler = watchdog.events.FileSystemEventHandler()
95 handler.on_any_event = callback
96 self.observer.schedule(handler, path, recursive=recursive)
97
98 def serve(self):
99 self.observer.start()
100
101 log.info(f"Serving on {self.url}")
102 self.serve_thread.start()
103
104 self._build_loop()
105
106 def _build_loop(self):
107 while True:
108 with self._rebuild_cond:
109 while not self._rebuild_cond.wait_for(
110 lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay
111 ):
112 # We could have used just one wait instead of a loop + timeout, but we need
113 # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.
114 pass
115 if self._shutdown:
116 break
117 log.info("Detected file changes")
118 while self._rebuild_cond.wait(timeout=self.build_delay):
119 log.debug("Waiting for file changes to stop happening")
120
121 self._wanted_epoch = _timestamp()
122 funcs = list(self._to_rebuild)
123 self._to_rebuild.clear()
124
125 for func in funcs:
126 func()
127
128 with self._epoch_cond:
129 log.info("Reloading browsers")
130 self._visible_epoch = self._wanted_epoch
131 self._epoch_cond.notify_all()
132
133 def shutdown(self):
134 self.observer.stop()
135 with self._rebuild_cond:
136 self._shutdown = True
137 self._rebuild_cond.notify_all()
138
139 if self.serve_thread.is_alive():
140 super().shutdown()
141 self.serve_thread.join()
142 self.observer.join()
143
144 def serve_request(self, environ, start_response):
145 try:
146 result = self._serve_request(environ, start_response)
147 except Exception:
148 code = 500
149 msg = "500 Internal Server Error"
150 log.exception(msg)
151 else:
152 if result is not None:
153 return result
154 code = 404
155 msg = "404 Not Found"
156
157 error_content = None
158 try:
159 error_content = self.error_handler(code)
160 except Exception:
161 log.exception("Failed to render an error message!")
162 if error_content is None:
163 error_content = msg.encode()
164
165 start_response(msg, [("Content-Type", "text/html")])
166 return [error_content]
167
168 def _serve_request(self, environ, start_response):
169 path = environ["PATH_INFO"]
170
171 m = re.fullmatch(r"/livereload/([0-9]+)/[0-9]+", path)
172 if m:
173 epoch = int(m[1])
174 start_response("200 OK", [("Content-Type", "text/plain")])
175
176 def condition():
177 return self._visible_epoch > epoch
178
179 with self._epoch_cond:
180 if not condition():
181 # Stall the browser, respond as soon as there's something new.
182 # If there's not, respond anyway after a minute.
183 self._log_poll_request(environ.get("HTTP_REFERER"), request_id=path)
184 self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)
185 return [b"%d" % self._visible_epoch]
186
187 if path == "/js/livereload.js":
188 file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "livereload.js")
189 elif path.startswith(self.mount_path):
190 if path.endswith("/"):
191 path += "index.html"
192 path = path[len(self.mount_path):]
193 file_path = os.path.join(self.root, path.lstrip("/"))
194 elif path == "/":
195 start_response("302 Found", [("Location", self.mount_path)])
196 return []
197 else:
198 return None # Not found
199
200 # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.
201 with self._epoch_cond:
202 self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)
203 epoch = self._visible_epoch
204
205 try:
206 file = open(file_path, "rb")
207 except OSError:
208 return None # Not found
209
210 if path.endswith(".html"):
211 with file:
212 content = file.read()
213 content = self._inject_js_into_html(content, epoch)
214 file = io.BytesIO(content)
215 content_length = len(content)
216 else:
217 content_length = os.path.getsize(file_path)
218
219 content_type = self._guess_type(file_path)
220 start_response(
221 "200 OK", [("Content-Type", content_type), ("Content-Length", str(content_length))]
222 )
223 return wsgiref.util.FileWrapper(file)
224
225 @classmethod
226 def _inject_js_into_html(cls, content, epoch):
227 try:
228 body_end = content.rindex(b"</body>")
229 except ValueError:
230 body_end = len(content)
231 # The page will reload if the livereload poller returns a newer epoch than what it knows.
232 # The other timestamp becomes just a unique identifier for the initiating page.
233 return (
234 b'%b<script src="/js/livereload.js"></script><script>livereload(%d, %d);</script>%b'
235 % (content[:body_end], epoch, _timestamp(), content[body_end:])
236 )
237
238 @classmethod
239 @functools.lru_cache() # "Cache" to not repeat the same message for the same browser tab.
240 def _log_poll_request(cls, url, request_id):
241 log.info(f"Browser connected: {url}")
242
243 def _guess_type(cls, path):
244 # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).
245 # Other uncommon types will not be accepted.
246 if path.endswith((".js", ".JS")):
247 return "application/javascript"
248 if path.endswith(".gz"):
249 return "application/gzip"
250
251 guess, _ = mimetypes.guess_type(path)
252 if guess:
253 return guess
254 return "application/octet-stream"
255
256
257 class _Handler(wsgiref.simple_server.WSGIRequestHandler):
258 def log_request(self, code="-", size="-"):
259 level = logging.DEBUG if str(code) == "200" else logging.WARNING
260 log.log(level, f'"{self.requestline}" code {code}')
261
262 def log_message(self, format, *args):
263 log.debug(format, *args)
264
265
266 def _timestamp():
267 return round(time.monotonic() * 1000)
268
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py
--- a/mkdocs/livereload/__init__.py
+++ b/mkdocs/livereload/__init__.py
@@ -4,6 +4,7 @@
import mimetypes
import os
import os.path
+import pathlib
import re
import socketserver
import threading
@@ -76,8 +77,10 @@
stacklevel=2,
)
- def callback(event):
- if event.is_directory:
+ def callback(event, allowed_path=None):
+ if isinstance(event, watchdog.events.DirCreatedEvent):
+ return
+ if allowed_path is not None and event.src_path != allowed_path:
return
# Text editors always cause a "file close" event in addition to "modified" when saving
# a file. Some editors also have "swap" functionality that keeps writing into another
@@ -91,9 +94,43 @@
self._to_rebuild[func] = True
self._rebuild_cond.notify_all()
- handler = watchdog.events.FileSystemEventHandler()
- handler.on_any_event = callback
- self.observer.schedule(handler, path, recursive=recursive)
+ dir_handler = watchdog.events.FileSystemEventHandler()
+ dir_handler.on_any_event = callback
+
+ seen = set()
+
+ def schedule(path):
+ seen.add(path)
+ if os.path.isfile(path):
+ # Watchdog doesn't support watching files, so watch its directory and filter by path
+ handler = watchdog.events.FileSystemEventHandler()
+ handler.on_any_event = lambda event: callback(event, allowed_path=path)
+
+ parent = os.path.dirname(path)
+ log.debug(f"Watching file '{path}' through directory '{parent}'")
+ self.observer.schedule(handler, parent)
+ else:
+ log.debug(f"Watching directory '{path}'")
+ self.observer.schedule(dir_handler, path, recursive=recursive)
+
+ schedule(os.path.realpath(path))
+
+ def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path
+ if path_obj.is_symlink():
+ # The extra `readlink` is needed due to https://bugs.python.org/issue9949
+ target = os.path.realpath(os.readlink(os.fspath(path_obj)))
+ if target in seen or not os.path.exists(target):
+ return
+ schedule(target)
+
+ path_obj = pathlib.Path(target)
+
+ if path_obj.is_dir() and recursive:
+ with os.scandir(os.fspath(path_obj)) as scan:
+ for entry in scan:
+ watch_symlink_targets(entry)
+
+ watch_symlink_targets(pathlib.Path(path))
def serve(self):
self.observer.start()
|
{"golden_diff": "diff --git a/mkdocs/livereload/__init__.py b/mkdocs/livereload/__init__.py\n--- a/mkdocs/livereload/__init__.py\n+++ b/mkdocs/livereload/__init__.py\n@@ -4,6 +4,7 @@\n import mimetypes\n import os\n import os.path\n+import pathlib\n import re\n import socketserver\n import threading\n@@ -76,8 +77,10 @@\n stacklevel=2,\n )\n \n- def callback(event):\n- if event.is_directory:\n+ def callback(event, allowed_path=None):\n+ if isinstance(event, watchdog.events.DirCreatedEvent):\n+ return\n+ if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n@@ -91,9 +94,43 @@\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n \n- handler = watchdog.events.FileSystemEventHandler()\n- handler.on_any_event = callback\n- self.observer.schedule(handler, path, recursive=recursive)\n+ dir_handler = watchdog.events.FileSystemEventHandler()\n+ dir_handler.on_any_event = callback\n+\n+ seen = set()\n+\n+ def schedule(path):\n+ seen.add(path)\n+ if os.path.isfile(path):\n+ # Watchdog doesn't support watching files, so watch its directory and filter by path\n+ handler = watchdog.events.FileSystemEventHandler()\n+ handler.on_any_event = lambda event: callback(event, allowed_path=path)\n+\n+ parent = os.path.dirname(path)\n+ log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n+ self.observer.schedule(handler, parent)\n+ else:\n+ log.debug(f\"Watching directory '{path}'\")\n+ self.observer.schedule(dir_handler, path, recursive=recursive)\n+\n+ schedule(os.path.realpath(path))\n+\n+ def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n+ if path_obj.is_symlink():\n+ # The extra `readlink` is needed due to https://bugs.python.org/issue9949\n+ target = os.path.realpath(os.readlink(os.fspath(path_obj)))\n+ if target in seen or not os.path.exists(target):\n+ return\n+ schedule(target)\n+\n+ path_obj = pathlib.Path(target)\n+\n+ if path_obj.is_dir() and recursive:\n+ with os.scandir(os.fspath(path_obj)) as scan:\n+ for entry in scan:\n+ watch_symlink_targets(entry)\n+\n+ watch_symlink_targets(pathlib.Path(path))\n \n def serve(self):\n self.observer.start()\n", "issue": "Re-building w/ symbolic links stopped working, regression after #2385\nSince a444c43 in master using the local development server via `mkdocs serve` updating files that are symbolically linked is not triggering to rebuild (and therefore not reloading browser tabs).\r\n\r\nOn first glance this is due to the switch to watchdog for detecting file-system changes which needs more guidance to handle this file-type.\r\n\r\nPreparing a PR with a patch.\r\n\r\nRef: a444c43474f91dea089922dd8fb188d1db3a4535\nrestore re-building with symbolic-links, closes #2425\npreviously (1.1.2 + master at 23e2051) building was triggered by changes\r\nof file-content that was symbolically linked within docs_dir while\r\n`mkdocs serve` was running.\r\n\r\nsince migrating from livereload>=2.6.1 to watchdog>=2.0.0 to detect\r\nfile-system changes (triggering the re-build) it stopped working.\r\n\r\nthis is because watchdog does not support symbolic links out of the box,\r\ne.g. see [1].\r\n\r\nchange is to provide additional observe instructions on the realpath [2]\r\nfor the following cases:\r\n\r\n1. docs_dir & config_file_path path deviation:\r\n\r\n when the absolute path to either the `docs_dir` or the `config_file` is\r\n different from its realpath, the realpath is added for observing (as\r\n well).\r\n\r\n2. symbolic links within docs_dir:\r\n\r\n if a file within docs_dir is a symbolic link, the files real path\r\n is added for observing. sub-directories (that are not symbolically\r\n linked) are traversed up to a depth of nine levels (only if the\r\n recursive flag is enabled, otherwise no traversal into sub-directories).\r\n\r\nRef: 23e205153f01d24d50fe9ba18e5186cdbc2c2dbe\r\n[1]: https://github.com/gorakhargosh/watchdog/issues/365\r\n[2]: <https://docs.python.org/3.8/library/os.path.html#os.path.realpath>\n", "before_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event):\n if event.is_directory:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = callback\n self.observer.schedule(handler, path, recursive=recursive)\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}], "after_files": [{"content": "import functools\nimport io\nimport logging\nimport mimetypes\nimport os\nimport os.path\nimport pathlib\nimport re\nimport socketserver\nimport threading\nimport time\nimport warnings\nimport wsgiref.simple_server\n\nimport watchdog.events\nimport watchdog.observers\n\n\nclass _LoggerAdapter(logging.LoggerAdapter):\n def process(self, msg, kwargs):\n return time.strftime(\"[%H:%M:%S] \") + msg, kwargs\n\n\nlog = _LoggerAdapter(logging.getLogger(__name__), {})\n\n\nclass LiveReloadServer(socketserver.ThreadingMixIn, wsgiref.simple_server.WSGIServer):\n daemon_threads = True\n poll_response_timeout = 60\n\n def __init__(\n self,\n builder,\n host,\n port,\n root,\n mount_path=\"/\",\n build_delay=0.25,\n shutdown_delay=0.25,\n **kwargs,\n ):\n self.builder = builder\n self.server_name = host\n self.server_port = port\n self.root = os.path.abspath(root)\n self.mount_path = (\"/\" + mount_path.lstrip(\"/\")).rstrip(\"/\") + \"/\"\n self.url = f\"http://{self.server_name}:{self.server_port}{self.mount_path}\"\n self.build_delay = build_delay\n self.shutdown_delay = shutdown_delay\n # To allow custom error pages.\n self.error_handler = lambda code: None\n\n super().__init__((host, port), _Handler, **kwargs)\n self.set_app(self.serve_request)\n\n self._wanted_epoch = _timestamp() # The version of the site that started building.\n self._visible_epoch = self._wanted_epoch # Latest fully built version of the site.\n self._epoch_cond = threading.Condition() # Must be held when accessing _visible_epoch.\n\n self._to_rebuild = {} # Used as an ordered set of functions to call.\n self._rebuild_cond = threading.Condition() # Must be held when accessing _to_rebuild.\n\n self._shutdown = False\n self.serve_thread = threading.Thread(target=lambda: self.serve_forever(shutdown_delay))\n self.observer = watchdog.observers.Observer(timeout=shutdown_delay)\n\n def watch(self, path, func=None, recursive=True):\n \"\"\"Add the 'path' to watched paths, call the function and reload when any file changes under it.\"\"\"\n path = os.path.abspath(path)\n if func in (None, self.builder):\n func = self.builder\n else:\n warnings.warn(\n \"Plugins should not pass the 'func' parameter of watch(). \"\n \"The ability to execute custom callbacks will be removed soon.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n def callback(event, allowed_path=None):\n if isinstance(event, watchdog.events.DirCreatedEvent):\n return\n if allowed_path is not None and event.src_path != allowed_path:\n return\n # Text editors always cause a \"file close\" event in addition to \"modified\" when saving\n # a file. Some editors also have \"swap\" functionality that keeps writing into another\n # file that's never closed. Prevent such write events from causing a rebuild.\n if isinstance(event, watchdog.events.FileModifiedEvent):\n # But FileClosedEvent is implemented only on Linux, otherwise we mustn't skip this:\n if type(self.observer).__name__ == \"InotifyObserver\":\n return\n log.debug(str(event))\n with self._rebuild_cond:\n self._to_rebuild[func] = True\n self._rebuild_cond.notify_all()\n\n dir_handler = watchdog.events.FileSystemEventHandler()\n dir_handler.on_any_event = callback\n\n seen = set()\n\n def schedule(path):\n seen.add(path)\n if os.path.isfile(path):\n # Watchdog doesn't support watching files, so watch its directory and filter by path\n handler = watchdog.events.FileSystemEventHandler()\n handler.on_any_event = lambda event: callback(event, allowed_path=path)\n\n parent = os.path.dirname(path)\n log.debug(f\"Watching file '{path}' through directory '{parent}'\")\n self.observer.schedule(handler, parent)\n else:\n log.debug(f\"Watching directory '{path}'\")\n self.observer.schedule(dir_handler, path, recursive=recursive)\n\n schedule(os.path.realpath(path))\n\n def watch_symlink_targets(path_obj): # path is os.DirEntry or pathlib.Path\n if path_obj.is_symlink():\n # The extra `readlink` is needed due to https://bugs.python.org/issue9949\n target = os.path.realpath(os.readlink(os.fspath(path_obj)))\n if target in seen or not os.path.exists(target):\n return\n schedule(target)\n\n path_obj = pathlib.Path(target)\n\n if path_obj.is_dir() and recursive:\n with os.scandir(os.fspath(path_obj)) as scan:\n for entry in scan:\n watch_symlink_targets(entry)\n\n watch_symlink_targets(pathlib.Path(path))\n\n def serve(self):\n self.observer.start()\n\n log.info(f\"Serving on {self.url}\")\n self.serve_thread.start()\n\n self._build_loop()\n\n def _build_loop(self):\n while True:\n with self._rebuild_cond:\n while not self._rebuild_cond.wait_for(\n lambda: self._to_rebuild or self._shutdown, timeout=self.shutdown_delay\n ):\n # We could have used just one wait instead of a loop + timeout, but we need\n # occasional breaks, otherwise on Windows we can't receive KeyboardInterrupt.\n pass\n if self._shutdown:\n break\n log.info(\"Detected file changes\")\n while self._rebuild_cond.wait(timeout=self.build_delay):\n log.debug(\"Waiting for file changes to stop happening\")\n\n self._wanted_epoch = _timestamp()\n funcs = list(self._to_rebuild)\n self._to_rebuild.clear()\n\n for func in funcs:\n func()\n\n with self._epoch_cond:\n log.info(\"Reloading browsers\")\n self._visible_epoch = self._wanted_epoch\n self._epoch_cond.notify_all()\n\n def shutdown(self):\n self.observer.stop()\n with self._rebuild_cond:\n self._shutdown = True\n self._rebuild_cond.notify_all()\n\n if self.serve_thread.is_alive():\n super().shutdown()\n self.serve_thread.join()\n self.observer.join()\n\n def serve_request(self, environ, start_response):\n try:\n result = self._serve_request(environ, start_response)\n except Exception:\n code = 500\n msg = \"500 Internal Server Error\"\n log.exception(msg)\n else:\n if result is not None:\n return result\n code = 404\n msg = \"404 Not Found\"\n\n error_content = None\n try:\n error_content = self.error_handler(code)\n except Exception:\n log.exception(\"Failed to render an error message!\")\n if error_content is None:\n error_content = msg.encode()\n\n start_response(msg, [(\"Content-Type\", \"text/html\")])\n return [error_content]\n\n def _serve_request(self, environ, start_response):\n path = environ[\"PATH_INFO\"]\n\n m = re.fullmatch(r\"/livereload/([0-9]+)/[0-9]+\", path)\n if m:\n epoch = int(m[1])\n start_response(\"200 OK\", [(\"Content-Type\", \"text/plain\")])\n\n def condition():\n return self._visible_epoch > epoch\n\n with self._epoch_cond:\n if not condition():\n # Stall the browser, respond as soon as there's something new.\n # If there's not, respond anyway after a minute.\n self._log_poll_request(environ.get(\"HTTP_REFERER\"), request_id=path)\n self._epoch_cond.wait_for(condition, timeout=self.poll_response_timeout)\n return [b\"%d\" % self._visible_epoch]\n\n if path == \"/js/livereload.js\":\n file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"livereload.js\")\n elif path.startswith(self.mount_path):\n if path.endswith(\"/\"):\n path += \"index.html\"\n path = path[len(self.mount_path):]\n file_path = os.path.join(self.root, path.lstrip(\"/\"))\n elif path == \"/\":\n start_response(\"302 Found\", [(\"Location\", self.mount_path)])\n return []\n else:\n return None # Not found\n\n # Wait until the ongoing rebuild (if any) finishes, so we're not serving a half-built site.\n with self._epoch_cond:\n self._epoch_cond.wait_for(lambda: self._visible_epoch == self._wanted_epoch)\n epoch = self._visible_epoch\n\n try:\n file = open(file_path, \"rb\")\n except OSError:\n return None # Not found\n\n if path.endswith(\".html\"):\n with file:\n content = file.read()\n content = self._inject_js_into_html(content, epoch)\n file = io.BytesIO(content)\n content_length = len(content)\n else:\n content_length = os.path.getsize(file_path)\n\n content_type = self._guess_type(file_path)\n start_response(\n \"200 OK\", [(\"Content-Type\", content_type), (\"Content-Length\", str(content_length))]\n )\n return wsgiref.util.FileWrapper(file)\n\n @classmethod\n def _inject_js_into_html(cls, content, epoch):\n try:\n body_end = content.rindex(b\"</body>\")\n except ValueError:\n body_end = len(content)\n # The page will reload if the livereload poller returns a newer epoch than what it knows.\n # The other timestamp becomes just a unique identifier for the initiating page.\n return (\n b'%b<script src=\"/js/livereload.js\"></script><script>livereload(%d, %d);</script>%b'\n % (content[:body_end], epoch, _timestamp(), content[body_end:])\n )\n\n @classmethod\n @functools.lru_cache() # \"Cache\" to not repeat the same message for the same browser tab.\n def _log_poll_request(cls, url, request_id):\n log.info(f\"Browser connected: {url}\")\n\n def _guess_type(cls, path):\n # MkDocs only ensures a few common types (as seen in livereload_tests.py::test_mime_types).\n # Other uncommon types will not be accepted.\n if path.endswith((\".js\", \".JS\")):\n return \"application/javascript\"\n if path.endswith(\".gz\"):\n return \"application/gzip\"\n\n guess, _ = mimetypes.guess_type(path)\n if guess:\n return guess\n return \"application/octet-stream\"\n\n\nclass _Handler(wsgiref.simple_server.WSGIRequestHandler):\n def log_request(self, code=\"-\", size=\"-\"):\n level = logging.DEBUG if str(code) == \"200\" else logging.WARNING\n log.log(level, f'\"{self.requestline}\" code {code}')\n\n def log_message(self, format, *args):\n log.debug(format, *args)\n\n\ndef _timestamp():\n return round(time.monotonic() * 1000)\n", "path": "mkdocs/livereload/__init__.py"}]}
| 3,537 | 616 |
gh_patches_debug_3766
|
rasdani/github-patches
|
git_diff
|
python__typeshed-8843
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
scripts/create_baseline_stubs.py instructions are outdated
I would prefer:
```
1. Manually review the generated stubs in {stub_dir}
2. Run tests locally if you want (see CONTRIBUTING.md)
3. Commit the changes on a new branch and create a typeshed PR
```
The CI will check everything anyway, and is set up so that you don't have to run anything locally. This would also be consistent with CONTRIBUTING.md.
_Originally posted by @Akuli in https://github.com/python/typeshed/issues/8686#issuecomment-1237374669_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scripts/create_baseline_stubs.py`
Content:
```
1 #!/usr/bin/env python3
2
3 """Script to generate unannotated baseline stubs using stubgen.
4
5 Basic usage:
6 $ python3 scripts/create_baseline_stubs.py <project on PyPI>
7
8 Run with -h for more help.
9 """
10
11 from __future__ import annotations
12
13 import argparse
14 import os
15 import re
16 import subprocess
17 import sys
18
19 if sys.version_info >= (3, 8):
20 from importlib.metadata import distribution
21
22 PYRIGHT_CONFIG = "pyrightconfig.stricter.json"
23
24
25 def search_pip_freeze_output(project: str, output: str) -> tuple[str, str] | None:
26 # Look for lines such as "typed-ast==1.4.2". '-' matches '_' and
27 # '_' matches '-' in project name, so that "typed_ast" matches
28 # "typed-ast", and vice versa.
29 regex = "^(" + re.sub(r"[-_]", "[-_]", project) + ")==(.*)"
30 m = re.search(regex, output, flags=re.IGNORECASE | re.MULTILINE)
31 if not m:
32 return None
33 return m.group(1), m.group(2)
34
35
36 def get_installed_package_info(project: str) -> tuple[str, str] | None:
37 """Find package information from pip freeze output.
38
39 Match project name somewhat fuzzily (case sensitive; '-' matches '_', and
40 vice versa).
41
42 Return (normalized project name, installed version) if successful.
43 """
44 r = subprocess.run(["pip", "freeze"], capture_output=True, text=True, check=True)
45 return search_pip_freeze_output(project, r.stdout)
46
47
48 def run_stubgen(package: str, output: str) -> None:
49 print(f"Running stubgen: stubgen -o {output} -p {package}")
50 subprocess.run(["stubgen", "-o", output, "-p", package], check=True)
51
52
53 def run_black(stub_dir: str) -> None:
54 print(f"Running black: black {stub_dir}")
55 subprocess.run(["black", stub_dir])
56
57
58 def run_isort(stub_dir: str) -> None:
59 print(f"Running isort: isort {stub_dir}")
60 subprocess.run(["python3", "-m", "isort", stub_dir])
61
62
63 def create_metadata(stub_dir: str, version: str) -> None:
64 """Create a METADATA.toml file."""
65 match = re.match(r"[0-9]+.[0-9]+", version)
66 if match is None:
67 sys.exit(f"Error: Cannot parse version number: {version}")
68 filename = os.path.join(stub_dir, "METADATA.toml")
69 version = match.group(0)
70 if os.path.exists(filename):
71 return
72 print(f"Writing {filename}")
73 with open(filename, "w") as file:
74 file.write(
75 f"""\
76 version = "{version}.*"
77
78 [tool.stubtest]
79 ignore_missing_stub = false
80 """
81 )
82
83
84 def add_pyright_exclusion(stub_dir: str) -> None:
85 """Exclude stub_dir from strict pyright checks."""
86 with open(PYRIGHT_CONFIG) as f:
87 lines = f.readlines()
88 i = 0
89 while i < len(lines) and not lines[i].strip().startswith('"exclude": ['):
90 i += 1
91 assert i < len(lines), f"Error parsing {PYRIGHT_CONFIG}"
92 while not lines[i].strip().startswith("]"):
93 i += 1
94 # Must use forward slash in the .json file
95 line_to_add = f' "{stub_dir}",'.replace("\\", "/")
96 initial = i - 1
97 while lines[i].lower() > line_to_add.lower():
98 i -= 1
99 if lines[i + 1].strip().rstrip(",") == line_to_add.strip().rstrip(","):
100 print(f"{PYRIGHT_CONFIG} already up-to-date")
101 return
102 if i == initial:
103 # Special case: when adding to the end of the list, commas need tweaking
104 line_to_add = line_to_add.rstrip(",")
105 lines[i] = lines[i].rstrip() + ",\n"
106 lines.insert(i + 1, line_to_add + "\n")
107 print(f"Updating {PYRIGHT_CONFIG}")
108 with open(PYRIGHT_CONFIG, "w") as f:
109 f.writelines(lines)
110
111
112 def main() -> None:
113 parser = argparse.ArgumentParser(
114 description="""Generate baseline stubs automatically for an installed pip package
115 using stubgen. Also run black and isort. If the name of
116 the project is different from the runtime Python package name, you may
117 need to use --package (example: --package yaml PyYAML)."""
118 )
119 parser.add_argument("project", help="name of PyPI project for which to generate stubs under stubs/")
120 parser.add_argument("--package", help="generate stubs for this Python package (default is autodetected)")
121 args = parser.parse_args()
122 project = args.project
123 package = args.package
124
125 if not re.match(r"[a-zA-Z0-9-_.]+$", project):
126 sys.exit(f"Invalid character in project name: {project!r}")
127
128 if not package:
129 package = project # default
130 # Try to find which packages are provided by the project
131 # Use default if that fails or if several packages are found
132 #
133 # The importlib.metadata module is used for projects whose name is different
134 # from the runtime Python package name (example: PyYAML/yaml)
135 if sys.version_info >= (3, 8):
136 dist = distribution(project).read_text("top_level.txt")
137 if dist is not None:
138 packages = [name for name in dist.split() if not name.startswith("_")]
139 if len(packages) == 1:
140 package = packages[0]
141 print(f'Using detected package "{package}" for project "{project}"', file=sys.stderr)
142 print("Suggestion: Try again with --package argument if that's not what you wanted", file=sys.stderr)
143
144 if not os.path.isdir("stubs") or not os.path.isdir("stdlib"):
145 sys.exit("Error: Current working directory must be the root of typeshed repository")
146
147 # Get normalized project name and version of installed package.
148 info = get_installed_package_info(project)
149 if info is None:
150 print(f'Error: "{project}" is not installed', file=sys.stderr)
151 print("", file=sys.stderr)
152 print(f'Suggestion: Run "python3 -m pip install {project}" and try again', file=sys.stderr)
153 sys.exit(1)
154 project, version = info
155
156 stub_dir = os.path.join("stubs", project)
157 package_dir = os.path.join(stub_dir, package)
158 if os.path.exists(package_dir):
159 sys.exit(f"Error: {package_dir} already exists (delete it first)")
160
161 run_stubgen(package, stub_dir)
162
163 run_isort(stub_dir)
164 run_black(stub_dir)
165
166 create_metadata(stub_dir, version)
167
168 # Since the generated stubs won't have many type annotations, we
169 # have to exclude them from strict pyright checks.
170 add_pyright_exclusion(stub_dir)
171
172 print("\nDone!\n\nSuggested next steps:")
173 print(f" 1. Manually review the generated stubs in {stub_dir}")
174 print(" 2. Optionally run tests and autofixes (see tests/README.md for details")
175 print(" 3. Commit the changes on a new branch and create a typeshed PR (don't force-push!)")
176
177
178 if __name__ == "__main__":
179 main()
180
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scripts/create_baseline_stubs.py b/scripts/create_baseline_stubs.py
--- a/scripts/create_baseline_stubs.py
+++ b/scripts/create_baseline_stubs.py
@@ -47,7 +47,7 @@
def run_stubgen(package: str, output: str) -> None:
print(f"Running stubgen: stubgen -o {output} -p {package}")
- subprocess.run(["stubgen", "-o", output, "-p", package], check=True)
+ subprocess.run(["stubgen", "-o", output, "-p", package, "--export-less"], check=True)
def run_black(stub_dir: str) -> None:
|
{"golden_diff": "diff --git a/scripts/create_baseline_stubs.py b/scripts/create_baseline_stubs.py\n--- a/scripts/create_baseline_stubs.py\n+++ b/scripts/create_baseline_stubs.py\n@@ -47,7 +47,7 @@\n \n def run_stubgen(package: str, output: str) -> None:\n print(f\"Running stubgen: stubgen -o {output} -p {package}\")\n- subprocess.run([\"stubgen\", \"-o\", output, \"-p\", package], check=True)\n+ subprocess.run([\"stubgen\", \"-o\", output, \"-p\", package, \"--export-less\"], check=True)\n \n \n def run_black(stub_dir: str) -> None:\n", "issue": "scripts/create_baseline_stubs.py instructions are outdated\nI would prefer:\r\n ```\r\n 1. Manually review the generated stubs in {stub_dir}\r\n 2. Run tests locally if you want (see CONTRIBUTING.md)\r\n 3. Commit the changes on a new branch and create a typeshed PR\r\n ```\r\n\r\nThe CI will check everything anyway, and is set up so that you don't have to run anything locally. This would also be consistent with CONTRIBUTING.md.\r\n\r\n_Originally posted by @Akuli in https://github.com/python/typeshed/issues/8686#issuecomment-1237374669_\r\n \n", "before_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Script to generate unannotated baseline stubs using stubgen.\n\nBasic usage:\n$ python3 scripts/create_baseline_stubs.py <project on PyPI>\n\nRun with -h for more help.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport os\nimport re\nimport subprocess\nimport sys\n\nif sys.version_info >= (3, 8):\n from importlib.metadata import distribution\n\nPYRIGHT_CONFIG = \"pyrightconfig.stricter.json\"\n\n\ndef search_pip_freeze_output(project: str, output: str) -> tuple[str, str] | None:\n # Look for lines such as \"typed-ast==1.4.2\". '-' matches '_' and\n # '_' matches '-' in project name, so that \"typed_ast\" matches\n # \"typed-ast\", and vice versa.\n regex = \"^(\" + re.sub(r\"[-_]\", \"[-_]\", project) + \")==(.*)\"\n m = re.search(regex, output, flags=re.IGNORECASE | re.MULTILINE)\n if not m:\n return None\n return m.group(1), m.group(2)\n\n\ndef get_installed_package_info(project: str) -> tuple[str, str] | None:\n \"\"\"Find package information from pip freeze output.\n\n Match project name somewhat fuzzily (case sensitive; '-' matches '_', and\n vice versa).\n\n Return (normalized project name, installed version) if successful.\n \"\"\"\n r = subprocess.run([\"pip\", \"freeze\"], capture_output=True, text=True, check=True)\n return search_pip_freeze_output(project, r.stdout)\n\n\ndef run_stubgen(package: str, output: str) -> None:\n print(f\"Running stubgen: stubgen -o {output} -p {package}\")\n subprocess.run([\"stubgen\", \"-o\", output, \"-p\", package], check=True)\n\n\ndef run_black(stub_dir: str) -> None:\n print(f\"Running black: black {stub_dir}\")\n subprocess.run([\"black\", stub_dir])\n\n\ndef run_isort(stub_dir: str) -> None:\n print(f\"Running isort: isort {stub_dir}\")\n subprocess.run([\"python3\", \"-m\", \"isort\", stub_dir])\n\n\ndef create_metadata(stub_dir: str, version: str) -> None:\n \"\"\"Create a METADATA.toml file.\"\"\"\n match = re.match(r\"[0-9]+.[0-9]+\", version)\n if match is None:\n sys.exit(f\"Error: Cannot parse version number: {version}\")\n filename = os.path.join(stub_dir, \"METADATA.toml\")\n version = match.group(0)\n if os.path.exists(filename):\n return\n print(f\"Writing {filename}\")\n with open(filename, \"w\") as file:\n file.write(\n f\"\"\"\\\nversion = \"{version}.*\"\n\n[tool.stubtest]\nignore_missing_stub = false\n\"\"\"\n )\n\n\ndef add_pyright_exclusion(stub_dir: str) -> None:\n \"\"\"Exclude stub_dir from strict pyright checks.\"\"\"\n with open(PYRIGHT_CONFIG) as f:\n lines = f.readlines()\n i = 0\n while i < len(lines) and not lines[i].strip().startswith('\"exclude\": ['):\n i += 1\n assert i < len(lines), f\"Error parsing {PYRIGHT_CONFIG}\"\n while not lines[i].strip().startswith(\"]\"):\n i += 1\n # Must use forward slash in the .json file\n line_to_add = f' \"{stub_dir}\",'.replace(\"\\\\\", \"/\")\n initial = i - 1\n while lines[i].lower() > line_to_add.lower():\n i -= 1\n if lines[i + 1].strip().rstrip(\",\") == line_to_add.strip().rstrip(\",\"):\n print(f\"{PYRIGHT_CONFIG} already up-to-date\")\n return\n if i == initial:\n # Special case: when adding to the end of the list, commas need tweaking\n line_to_add = line_to_add.rstrip(\",\")\n lines[i] = lines[i].rstrip() + \",\\n\"\n lines.insert(i + 1, line_to_add + \"\\n\")\n print(f\"Updating {PYRIGHT_CONFIG}\")\n with open(PYRIGHT_CONFIG, \"w\") as f:\n f.writelines(lines)\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(\n description=\"\"\"Generate baseline stubs automatically for an installed pip package\n using stubgen. Also run black and isort. If the name of\n the project is different from the runtime Python package name, you may\n need to use --package (example: --package yaml PyYAML).\"\"\"\n )\n parser.add_argument(\"project\", help=\"name of PyPI project for which to generate stubs under stubs/\")\n parser.add_argument(\"--package\", help=\"generate stubs for this Python package (default is autodetected)\")\n args = parser.parse_args()\n project = args.project\n package = args.package\n\n if not re.match(r\"[a-zA-Z0-9-_.]+$\", project):\n sys.exit(f\"Invalid character in project name: {project!r}\")\n\n if not package:\n package = project # default\n # Try to find which packages are provided by the project\n # Use default if that fails or if several packages are found\n #\n # The importlib.metadata module is used for projects whose name is different\n # from the runtime Python package name (example: PyYAML/yaml)\n if sys.version_info >= (3, 8):\n dist = distribution(project).read_text(\"top_level.txt\")\n if dist is not None:\n packages = [name for name in dist.split() if not name.startswith(\"_\")]\n if len(packages) == 1:\n package = packages[0]\n print(f'Using detected package \"{package}\" for project \"{project}\"', file=sys.stderr)\n print(\"Suggestion: Try again with --package argument if that's not what you wanted\", file=sys.stderr)\n\n if not os.path.isdir(\"stubs\") or not os.path.isdir(\"stdlib\"):\n sys.exit(\"Error: Current working directory must be the root of typeshed repository\")\n\n # Get normalized project name and version of installed package.\n info = get_installed_package_info(project)\n if info is None:\n print(f'Error: \"{project}\" is not installed', file=sys.stderr)\n print(\"\", file=sys.stderr)\n print(f'Suggestion: Run \"python3 -m pip install {project}\" and try again', file=sys.stderr)\n sys.exit(1)\n project, version = info\n\n stub_dir = os.path.join(\"stubs\", project)\n package_dir = os.path.join(stub_dir, package)\n if os.path.exists(package_dir):\n sys.exit(f\"Error: {package_dir} already exists (delete it first)\")\n\n run_stubgen(package, stub_dir)\n\n run_isort(stub_dir)\n run_black(stub_dir)\n\n create_metadata(stub_dir, version)\n\n # Since the generated stubs won't have many type annotations, we\n # have to exclude them from strict pyright checks.\n add_pyright_exclusion(stub_dir)\n\n print(\"\\nDone!\\n\\nSuggested next steps:\")\n print(f\" 1. Manually review the generated stubs in {stub_dir}\")\n print(\" 2. Optionally run tests and autofixes (see tests/README.md for details\")\n print(\" 3. Commit the changes on a new branch and create a typeshed PR (don't force-push!)\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/create_baseline_stubs.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n\n\"\"\"Script to generate unannotated baseline stubs using stubgen.\n\nBasic usage:\n$ python3 scripts/create_baseline_stubs.py <project on PyPI>\n\nRun with -h for more help.\n\"\"\"\n\nfrom __future__ import annotations\n\nimport argparse\nimport os\nimport re\nimport subprocess\nimport sys\n\nif sys.version_info >= (3, 8):\n from importlib.metadata import distribution\n\nPYRIGHT_CONFIG = \"pyrightconfig.stricter.json\"\n\n\ndef search_pip_freeze_output(project: str, output: str) -> tuple[str, str] | None:\n # Look for lines such as \"typed-ast==1.4.2\". '-' matches '_' and\n # '_' matches '-' in project name, so that \"typed_ast\" matches\n # \"typed-ast\", and vice versa.\n regex = \"^(\" + re.sub(r\"[-_]\", \"[-_]\", project) + \")==(.*)\"\n m = re.search(regex, output, flags=re.IGNORECASE | re.MULTILINE)\n if not m:\n return None\n return m.group(1), m.group(2)\n\n\ndef get_installed_package_info(project: str) -> tuple[str, str] | None:\n \"\"\"Find package information from pip freeze output.\n\n Match project name somewhat fuzzily (case sensitive; '-' matches '_', and\n vice versa).\n\n Return (normalized project name, installed version) if successful.\n \"\"\"\n r = subprocess.run([\"pip\", \"freeze\"], capture_output=True, text=True, check=True)\n return search_pip_freeze_output(project, r.stdout)\n\n\ndef run_stubgen(package: str, output: str) -> None:\n print(f\"Running stubgen: stubgen -o {output} -p {package}\")\n subprocess.run([\"stubgen\", \"-o\", output, \"-p\", package, \"--export-less\"], check=True)\n\n\ndef run_black(stub_dir: str) -> None:\n print(f\"Running black: black {stub_dir}\")\n subprocess.run([\"black\", stub_dir])\n\n\ndef run_isort(stub_dir: str) -> None:\n print(f\"Running isort: isort {stub_dir}\")\n subprocess.run([\"python3\", \"-m\", \"isort\", stub_dir])\n\n\ndef create_metadata(stub_dir: str, version: str) -> None:\n \"\"\"Create a METADATA.toml file.\"\"\"\n match = re.match(r\"[0-9]+.[0-9]+\", version)\n if match is None:\n sys.exit(f\"Error: Cannot parse version number: {version}\")\n filename = os.path.join(stub_dir, \"METADATA.toml\")\n version = match.group(0)\n if os.path.exists(filename):\n return\n print(f\"Writing {filename}\")\n with open(filename, \"w\") as file:\n file.write(\n f\"\"\"\\\nversion = \"{version}.*\"\n\n[tool.stubtest]\nignore_missing_stub = false\n\"\"\"\n )\n\n\ndef add_pyright_exclusion(stub_dir: str) -> None:\n \"\"\"Exclude stub_dir from strict pyright checks.\"\"\"\n with open(PYRIGHT_CONFIG) as f:\n lines = f.readlines()\n i = 0\n while i < len(lines) and not lines[i].strip().startswith('\"exclude\": ['):\n i += 1\n assert i < len(lines), f\"Error parsing {PYRIGHT_CONFIG}\"\n while not lines[i].strip().startswith(\"]\"):\n i += 1\n # Must use forward slash in the .json file\n line_to_add = f' \"{stub_dir}\",'.replace(\"\\\\\", \"/\")\n initial = i - 1\n while lines[i].lower() > line_to_add.lower():\n i -= 1\n if lines[i + 1].strip().rstrip(\",\") == line_to_add.strip().rstrip(\",\"):\n print(f\"{PYRIGHT_CONFIG} already up-to-date\")\n return\n if i == initial:\n # Special case: when adding to the end of the list, commas need tweaking\n line_to_add = line_to_add.rstrip(\",\")\n lines[i] = lines[i].rstrip() + \",\\n\"\n lines.insert(i + 1, line_to_add + \"\\n\")\n print(f\"Updating {PYRIGHT_CONFIG}\")\n with open(PYRIGHT_CONFIG, \"w\") as f:\n f.writelines(lines)\n\n\ndef main() -> None:\n parser = argparse.ArgumentParser(\n description=\"\"\"Generate baseline stubs automatically for an installed pip package\n using stubgen. Also run black and isort. If the name of\n the project is different from the runtime Python package name, you may\n need to use --package (example: --package yaml PyYAML).\"\"\"\n )\n parser.add_argument(\"project\", help=\"name of PyPI project for which to generate stubs under stubs/\")\n parser.add_argument(\"--package\", help=\"generate stubs for this Python package (default is autodetected)\")\n args = parser.parse_args()\n project = args.project\n package = args.package\n\n if not re.match(r\"[a-zA-Z0-9-_.]+$\", project):\n sys.exit(f\"Invalid character in project name: {project!r}\")\n\n if not package:\n package = project # default\n # Try to find which packages are provided by the project\n # Use default if that fails or if several packages are found\n #\n # The importlib.metadata module is used for projects whose name is different\n # from the runtime Python package name (example: PyYAML/yaml)\n if sys.version_info >= (3, 8):\n dist = distribution(project).read_text(\"top_level.txt\")\n if dist is not None:\n packages = [name for name in dist.split() if not name.startswith(\"_\")]\n if len(packages) == 1:\n package = packages[0]\n print(f'Using detected package \"{package}\" for project \"{project}\"', file=sys.stderr)\n print(\"Suggestion: Try again with --package argument if that's not what you wanted\", file=sys.stderr)\n\n if not os.path.isdir(\"stubs\") or not os.path.isdir(\"stdlib\"):\n sys.exit(\"Error: Current working directory must be the root of typeshed repository\")\n\n # Get normalized project name and version of installed package.\n info = get_installed_package_info(project)\n if info is None:\n print(f'Error: \"{project}\" is not installed', file=sys.stderr)\n print(\"\", file=sys.stderr)\n print(f'Suggestion: Run \"python3 -m pip install {project}\" and try again', file=sys.stderr)\n sys.exit(1)\n project, version = info\n\n stub_dir = os.path.join(\"stubs\", project)\n package_dir = os.path.join(stub_dir, package)\n if os.path.exists(package_dir):\n sys.exit(f\"Error: {package_dir} already exists (delete it first)\")\n\n run_stubgen(package, stub_dir)\n\n run_isort(stub_dir)\n run_black(stub_dir)\n\n create_metadata(stub_dir, version)\n\n # Since the generated stubs won't have many type annotations, we\n # have to exclude them from strict pyright checks.\n add_pyright_exclusion(stub_dir)\n\n print(\"\\nDone!\\n\\nSuggested next steps:\")\n print(f\" 1. Manually review the generated stubs in {stub_dir}\")\n print(\" 2. Optionally run tests and autofixes (see tests/README.md for details\")\n print(\" 3. Commit the changes on a new branch and create a typeshed PR (don't force-push!)\")\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "scripts/create_baseline_stubs.py"}]}
| 2,484 | 143 |
gh_patches_debug_17680
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-2151
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to fetch project updates in Up app
The current UAT build for the 3.13 release breaks the project updates download when Up data is loaded or refreshed:
https://github.com/akvo/akvo-rsr-up/issues/186
Environment:
Request Method: GET
Request URL: http://rsr.uat.akvo.org/rest/v1/project_up/2210/?format=xml&image_thumb_name=up&image_thumb_up_width=100
Django Version: 1.7.7
Python Version: 2.7.3
Installed Applications:
('nested_inline',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.humanize',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.webdesign',
'akvo.codelists',
'akvo.rsr',
'akvo.api',
'registration',
'template_utils',
'paypal.standard.ipn',
'sorl.thumbnail',
'django_counter',
'mollie.ideal',
'django_sorting',
'pagination',
'embed_video',
'django_markup',
'django_filters',
'tastypie',
'rest_framework',
'rest_framework.authtoken',
'rest_framework_swagger',
'pipeline',
'bootstrap3',
'rules',
'django_crontab',
'raven.contrib.django.raven_compat')
Installed Middleware:
('akvo.rsr.middleware.HostDispatchMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.http.ConditionalGetMiddleware',
'django_sorting.middleware.SortingMiddleware',
'pagination.middleware.PaginationMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.middleware.doc.XViewMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'akvo.rsr.middleware.ExceptionLoggingMiddleware',
'akvo.rsr.middleware.RSRVersionHeaderMiddleware',
'django_statsd.middleware.GraphiteRequestTimingMiddleware',
'django_statsd.middleware.GraphiteMiddleware',
'django_statsd.middleware.TastyPieRequestTimingMiddleware')
Traceback:
File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/core/handlers/base.py" in get_response
response = wrapped_callback(request, callback_args, _callback_kwargs) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/viewsets.py" in view
return self.dispatch(request, args, *kwargs) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/views/decorators/csrf.py" in wrapped_view
return view_func(args, *kwargs) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/views.py" in dispatch
response = self.handle_exception(exc) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/views.py" in dispatch
response = handler(request, args, *kwargs) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/mixins.py" in retrieve
self.object = self.get_object() File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/generics.py" in get_object
queryset = self.filter_queryset(self.get_queryset()) File "/var/akvo/rsr/code/akvo/rest/views/project.py" in get_queryset
return super(ProjectViewSet, self).get_queryset() File "/var/akvo/rsr/code/akvo/rest/viewsets.py" in get_queryset
queryset = super(PublicProjectViewSet, self).get_queryset() File "/var/akvo/rsr/code/akvo/rest/viewsets.py" in get_queryset
queryset = queryset.filter(_*lookup) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/query.py" in filter
return self._filter_or_exclude(False, args, *kwargs) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/query.py" in _filter_or_exclude
clone.query.add_q(Q(args, *kwargs)) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py" in add_q
clause, require_inner = self._add_q(where_part, self.used_aliases) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py" in _add_q
current_negated=current_negated, connector=connector) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py" in build_filter
lookups, parts, reffed_aggregate = self.solve_lookup_type(arg) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py" in solve_lookup_type
_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py" in names_to_path
self.raise_field_error(opts, name) File "/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py" in raise_field_error
"Choices are: %s" % (name, ", ".join(available)))
Exception Type: FieldError at /rest/v1/project_up/2210/
Exception Value: Cannot resolve keyword u'image_thumb_name' into field. Choices are: background, benchmarks, budget, budget_items, capital_spend_percentage, categories, collaboration_type, comments, conditions, contacts, country_budget_items, country_budget_vocabulary, created_at, crsadd, currency, current_image, current_image_caption, current_image_credit, current_status, custom_fields, date_end_actual, date_end_planned, date_start_actual, date_start_planned, default_aid_type, default_finance_type, default_flow_type, default_tied_status, documents, donate_button, fss, funds, funds_needed, goals, goals_overview, hierarchy, humanitarian, humanitarian_scopes, iati_activity_id, iati_checks, iati_project_exports, iati_project_import_logs, iati_project_imports, iatiexport, iatiimportjob, id, invoices, is_impact_project, is_public, keywords, language, last_modified_at, last_update, last_update_id, legacy_data, links, locations, notes, partners, partnerships, paymentgatewayselector, planned_disbursements, policy_markers, primary_location, primary_location_id, primary_organisation, primary_organisation_id, project_plan, project_plan_summary, project_scope, project_updates, publishingstatus, recipient_countries, recipient_regions, related_projects, related_to_projects, results, sectors, status, subtitle, sustainability, target_group, title, transactions, validations
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rest/viewsets.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 # Akvo RSR is covered by the GNU Affero General Public License.
4 # See more details in the license.txt file located at the root folder of the Akvo RSR module.
5 # For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
6
7 from django.db.models.fields.related import ForeignKey, ForeignObject
8
9 from akvo.rest.models import TastyTokenAuthentication
10
11 from rest_framework import authentication, filters, permissions, viewsets
12
13 from .filters import RSRGenericFilterBackend
14
15
16 class SafeMethodsPermissions(permissions.DjangoObjectPermissions):
17 """
18 Base class to allow any safe methods ('GET', 'OPTIONS' and 'HEAD') without needing to
19 authenticate.
20 """
21
22 def has_permission(self, request, view):
23 if request.method in permissions.SAFE_METHODS:
24 return True
25 return super(SafeMethodsPermissions, self).has_permission(request, view)
26
27
28 class BaseRSRViewSet(viewsets.ModelViewSet):
29 """
30 Base class used for the view sets for RSR models. Provides unified auth and perms settings.
31 """
32 authentication_classes = (authentication.SessionAuthentication, TastyTokenAuthentication, )
33 permission_classes = (SafeMethodsPermissions, )
34 filter_backends = (filters.OrderingFilter, RSRGenericFilterBackend,)
35 ordering_fields = '__all__'
36
37 def get_queryset(self):
38
39 def django_filter_filters(request):
40 """
41 Support emulating the DjangoFilterBackend-based filtering that some views used to have
42 """
43 # query string keys reserved by the RSRGenericFilterBackend
44 qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related', ]
45 # query string keys used by core DRF, OrderingFilter and Akvo custom views
46 exclude_params = ['limit', 'format', 'page', 'ordering', 'partner_type', 'sync_owner',
47 'reporting_org']
48 filters = {}
49 for key in request.QUERY_PARAMS.keys():
50 if key not in qs_params + exclude_params:
51 filters.update({key: request.QUERY_PARAMS.get(key)})
52 return filters
53
54 def get_lookups_from_filters(legacy_filters):
55 """
56 Cast the values in DjangoFilterBackend-styled query string filters to correct types to
57 be able to use them in regular queryset-filter() calls
58 """
59 # types of lookups supported by the views using DjangoFilterBackend
60 LEGACY_FIELD_LOOKUPS = ['exact', 'contains', 'icontains', 'gt', 'gte', 'lt',
61 'lte', ]
62 query_set_lookups = []
63 for key, value in legacy_filters.items():
64 parts = key.split('__')
65 if parts[-1] in LEGACY_FIELD_LOOKUPS:
66 parts = parts[:-1]
67 model = queryset.model
68 for part in parts:
69 field_object, related_model, direct, m2m = model._meta.get_field_by_name(
70 part)
71 if direct:
72 if issubclass(field_object.__class__, ForeignObject):
73 model = field_object.related.parent_model
74 else:
75 value = field_object.to_python(value)
76 break
77 else:
78 model = related_model
79 query_set_lookups += [{key: value}]
80 return query_set_lookups
81
82 queryset = super(BaseRSRViewSet, self).get_queryset()
83
84 # support for old DjangoFilterBackend-based filtering
85 # find all "old styled" filters
86 legacy_filters = django_filter_filters(self.request)
87 # create lookup dicts from the filters found
88 lookups = get_lookups_from_filters(legacy_filters)
89 for lookup in lookups:
90 queryset = queryset.filter(**lookup)
91
92 return queryset
93
94
95 class PublicProjectViewSet(BaseRSRViewSet):
96 """
97 Only public projects or objects related to public projects will be shown.
98 """
99 # project_relation is the default string for constructing a field lookup to the is_public field
100 # on the related Project. Override this in when the viewset is for a model that doesn't have a
101 # direct FK to Project or the FK field isn't named project. E.g. IndicatorViewSet:
102 # project_relation = 'result__project__'
103 # The lookup is used to filter out objects associated with private projects, see below.
104 project_relation = 'project__'
105
106 def get_queryset(self):
107
108 request = self.request
109 user = request.user
110
111 queryset = super(PublicProjectViewSet, self).get_queryset()
112
113 def projects_filter_for_non_privileged_users(user, queryset):
114 # Construct the public projects filter field lookup.
115 project_filter = self.project_relation + 'is_public'
116
117 # Filter the object list into two querysets;
118 # One where the related Projects are public and one where they are private
119 public_objects = queryset.filter(**{project_filter: True}).distinct()
120 private_objects = queryset.filter(**{project_filter: False}).distinct()
121
122 # In case of an anonymous user, only return the public objects
123 if user.is_anonymous():
124 queryset = public_objects
125
126 # Otherwise, check to which objects the user has (change) permission
127 elif private_objects:
128 permission = type(private_objects[0])._meta.db_table.replace('_', '.change_')
129 permitted_obj_pks = []
130
131 # Loop through all 'private' objects to see if the user has permission to change
132 # it. If so add its PK to the list of permitted objects.
133 for obj in private_objects:
134 if user.has_perm(permission, obj):
135 permitted_obj_pks.append(obj.pk)
136
137 queryset = public_objects | queryset.filter(pk__in=permitted_obj_pks).distinct()
138
139 return queryset
140
141 # filter projects if user is "non-privileged"
142 if user.is_anonymous() or not (user.is_superuser or user.is_admin):
143 queryset = projects_filter_for_non_privileged_users(user, queryset)
144
145 return queryset
146
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rest/viewsets.py b/akvo/rest/viewsets.py
--- a/akvo/rest/viewsets.py
+++ b/akvo/rest/viewsets.py
@@ -44,10 +44,10 @@
qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related', ]
# query string keys used by core DRF, OrderingFilter and Akvo custom views
exclude_params = ['limit', 'format', 'page', 'ordering', 'partner_type', 'sync_owner',
- 'reporting_org']
+ 'reporting_org', ]
filters = {}
for key in request.QUERY_PARAMS.keys():
- if key not in qs_params + exclude_params:
+ if key not in qs_params + exclude_params and not key.startswith('image_thumb_'):
filters.update({key: request.QUERY_PARAMS.get(key)})
return filters
|
{"golden_diff": "diff --git a/akvo/rest/viewsets.py b/akvo/rest/viewsets.py\n--- a/akvo/rest/viewsets.py\n+++ b/akvo/rest/viewsets.py\n@@ -44,10 +44,10 @@\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related', ]\n # query string keys used by core DRF, OrderingFilter and Akvo custom views\n exclude_params = ['limit', 'format', 'page', 'ordering', 'partner_type', 'sync_owner',\n- 'reporting_org']\n+ 'reporting_org', ]\n filters = {}\n for key in request.QUERY_PARAMS.keys():\n- if key not in qs_params + exclude_params:\n+ if key not in qs_params + exclude_params and not key.startswith('image_thumb_'):\n filters.update({key: request.QUERY_PARAMS.get(key)})\n return filters\n", "issue": "Unable to fetch project updates in Up app\nThe current UAT build for the 3.13 release breaks the project updates download when Up data is loaded or refreshed:\n\nhttps://github.com/akvo/akvo-rsr-up/issues/186\n\nEnvironment:\n\nRequest Method: GET\nRequest URL: http://rsr.uat.akvo.org/rest/v1/project_up/2210/?format=xml&image_thumb_name=up&image_thumb_up_width=100\n\nDjango Version: 1.7.7\nPython Version: 2.7.3\nInstalled Applications:\n('nested_inline',\n'django.contrib.admin',\n'django.contrib.auth',\n'django.contrib.contenttypes',\n'django.contrib.humanize',\n'django.contrib.messages',\n'django.contrib.sessions',\n'django.contrib.staticfiles',\n'django.contrib.webdesign',\n'akvo.codelists',\n'akvo.rsr',\n'akvo.api',\n'registration',\n'template_utils',\n'paypal.standard.ipn',\n'sorl.thumbnail',\n'django_counter',\n'mollie.ideal',\n'django_sorting',\n'pagination',\n'embed_video',\n'django_markup',\n'django_filters',\n'tastypie',\n'rest_framework',\n'rest_framework.authtoken',\n'rest_framework_swagger',\n'pipeline',\n'bootstrap3',\n'rules',\n'django_crontab',\n'raven.contrib.django.raven_compat')\nInstalled Middleware:\n('akvo.rsr.middleware.HostDispatchMiddleware',\n'django.contrib.sessions.middleware.SessionMiddleware',\n'django.middleware.locale.LocaleMiddleware',\n'django.middleware.csrf.CsrfViewMiddleware',\n'django.middleware.http.ConditionalGetMiddleware',\n'django_sorting.middleware.SortingMiddleware',\n'pagination.middleware.PaginationMiddleware',\n'django.middleware.common.CommonMiddleware',\n'django.contrib.auth.middleware.AuthenticationMiddleware',\n'django.middleware.doc.XViewMiddleware',\n'django.contrib.messages.middleware.MessageMiddleware',\n'akvo.rsr.middleware.ExceptionLoggingMiddleware',\n'akvo.rsr.middleware.RSRVersionHeaderMiddleware',\n'django_statsd.middleware.GraphiteRequestTimingMiddleware',\n'django_statsd.middleware.GraphiteMiddleware',\n'django_statsd.middleware.TastyPieRequestTimingMiddleware')\n\nTraceback:\nFile \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/core/handlers/base.py\" in get_response\n\nresponse = wrapped_callback(request, callback_args, _callback_kwargs) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/viewsets.py\" in view\nreturn self.dispatch(request, args, *kwargs) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/views/decorators/csrf.py\" in wrapped_view\nreturn view_func(args, *kwargs) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/views.py\" in dispatch\nresponse = self.handle_exception(exc) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/views.py\" in dispatch\nresponse = handler(request, args, *kwargs) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/mixins.py\" in retrieve\nself.object = self.get_object() File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/rest_framework/generics.py\" in get_object\nqueryset = self.filter_queryset(self.get_queryset()) File \"/var/akvo/rsr/code/akvo/rest/views/project.py\" in get_queryset\nreturn super(ProjectViewSet, self).get_queryset() File \"/var/akvo/rsr/code/akvo/rest/viewsets.py\" in get_queryset\nqueryset = super(PublicProjectViewSet, self).get_queryset() File \"/var/akvo/rsr/code/akvo/rest/viewsets.py\" in get_queryset\nqueryset = queryset.filter(_*lookup) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/query.py\" in filter\nreturn self._filter_or_exclude(False, args, *kwargs) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/query.py\" in _filter_or_exclude\nclone.query.add_q(Q(args, *kwargs)) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py\" in add_q\nclause, require_inner = self._add_q(where_part, self.used_aliases) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py\" in _add_q\ncurrent_negated=current_negated, connector=connector) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py\" in build_filter\nlookups, parts, reffed_aggregate = self.solve_lookup_type(arg) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py\" in solve_lookup_type\n_, field, _, lookup_parts = self.names_to_path(lookup_splitted, self.get_meta()) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py\" in names_to_path\nself.raise_field_error(opts, name) File \"/var/akvo/rsr/versions/deploy-RSR_Deploy-251/venv/local/lib/python2.7/site-packages/django/db/models/sql/query.py\" in raise_field_error\n\"Choices are: %s\" % (name, \", \".join(available)))\nException Type: FieldError at /rest/v1/project_up/2210/\nException Value: Cannot resolve keyword u'image_thumb_name' into field. Choices are: background, benchmarks, budget, budget_items, capital_spend_percentage, categories, collaboration_type, comments, conditions, contacts, country_budget_items, country_budget_vocabulary, created_at, crsadd, currency, current_image, current_image_caption, current_image_credit, current_status, custom_fields, date_end_actual, date_end_planned, date_start_actual, date_start_planned, default_aid_type, default_finance_type, default_flow_type, default_tied_status, documents, donate_button, fss, funds, funds_needed, goals, goals_overview, hierarchy, humanitarian, humanitarian_scopes, iati_activity_id, iati_checks, iati_project_exports, iati_project_import_logs, iati_project_imports, iatiexport, iatiimportjob, id, invoices, is_impact_project, is_public, keywords, language, last_modified_at, last_update, last_update_id, legacy_data, links, locations, notes, partners, partnerships, paymentgatewayselector, planned_disbursements, policy_markers, primary_location, primary_location_id, primary_organisation, primary_organisation_id, project_plan, project_plan_summary, project_scope, project_updates, publishingstatus, recipient_countries, recipient_regions, related_projects, related_to_projects, results, sectors, status, subtitle, sustainability, target_group, title, transactions, validations\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models.fields.related import ForeignKey, ForeignObject\n\nfrom akvo.rest.models import TastyTokenAuthentication\n\nfrom rest_framework import authentication, filters, permissions, viewsets\n\nfrom .filters import RSRGenericFilterBackend\n\n\nclass SafeMethodsPermissions(permissions.DjangoObjectPermissions):\n \"\"\"\n Base class to allow any safe methods ('GET', 'OPTIONS' and 'HEAD') without needing to\n authenticate.\n \"\"\"\n\n def has_permission(self, request, view):\n if request.method in permissions.SAFE_METHODS:\n return True\n return super(SafeMethodsPermissions, self).has_permission(request, view)\n\n\nclass BaseRSRViewSet(viewsets.ModelViewSet):\n \"\"\"\n Base class used for the view sets for RSR models. Provides unified auth and perms settings.\n \"\"\"\n authentication_classes = (authentication.SessionAuthentication, TastyTokenAuthentication, )\n permission_classes = (SafeMethodsPermissions, )\n filter_backends = (filters.OrderingFilter, RSRGenericFilterBackend,)\n ordering_fields = '__all__'\n\n def get_queryset(self):\n\n def django_filter_filters(request):\n \"\"\"\n Support emulating the DjangoFilterBackend-based filtering that some views used to have\n \"\"\"\n # query string keys reserved by the RSRGenericFilterBackend\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related', ]\n # query string keys used by core DRF, OrderingFilter and Akvo custom views\n exclude_params = ['limit', 'format', 'page', 'ordering', 'partner_type', 'sync_owner',\n 'reporting_org']\n filters = {}\n for key in request.QUERY_PARAMS.keys():\n if key not in qs_params + exclude_params:\n filters.update({key: request.QUERY_PARAMS.get(key)})\n return filters\n\n def get_lookups_from_filters(legacy_filters):\n \"\"\"\n Cast the values in DjangoFilterBackend-styled query string filters to correct types to\n be able to use them in regular queryset-filter() calls\n \"\"\"\n # types of lookups supported by the views using DjangoFilterBackend\n LEGACY_FIELD_LOOKUPS = ['exact', 'contains', 'icontains', 'gt', 'gte', 'lt',\n 'lte', ]\n query_set_lookups = []\n for key, value in legacy_filters.items():\n parts = key.split('__')\n if parts[-1] in LEGACY_FIELD_LOOKUPS:\n parts = parts[:-1]\n model = queryset.model\n for part in parts:\n field_object, related_model, direct, m2m = model._meta.get_field_by_name(\n part)\n if direct:\n if issubclass(field_object.__class__, ForeignObject):\n model = field_object.related.parent_model\n else:\n value = field_object.to_python(value)\n break\n else:\n model = related_model\n query_set_lookups += [{key: value}]\n return query_set_lookups\n\n queryset = super(BaseRSRViewSet, self).get_queryset()\n\n # support for old DjangoFilterBackend-based filtering\n # find all \"old styled\" filters\n legacy_filters = django_filter_filters(self.request)\n # create lookup dicts from the filters found\n lookups = get_lookups_from_filters(legacy_filters)\n for lookup in lookups:\n queryset = queryset.filter(**lookup)\n\n return queryset\n\n\nclass PublicProjectViewSet(BaseRSRViewSet):\n \"\"\"\n Only public projects or objects related to public projects will be shown.\n \"\"\"\n # project_relation is the default string for constructing a field lookup to the is_public field\n # on the related Project. Override this in when the viewset is for a model that doesn't have a\n # direct FK to Project or the FK field isn't named project. E.g. IndicatorViewSet:\n # project_relation = 'result__project__'\n # The lookup is used to filter out objects associated with private projects, see below.\n project_relation = 'project__'\n\n def get_queryset(self):\n\n request = self.request\n user = request.user\n\n queryset = super(PublicProjectViewSet, self).get_queryset()\n\n def projects_filter_for_non_privileged_users(user, queryset):\n # Construct the public projects filter field lookup.\n project_filter = self.project_relation + 'is_public'\n\n # Filter the object list into two querysets;\n # One where the related Projects are public and one where they are private\n public_objects = queryset.filter(**{project_filter: True}).distinct()\n private_objects = queryset.filter(**{project_filter: False}).distinct()\n\n # In case of an anonymous user, only return the public objects\n if user.is_anonymous():\n queryset = public_objects\n\n # Otherwise, check to which objects the user has (change) permission\n elif private_objects:\n permission = type(private_objects[0])._meta.db_table.replace('_', '.change_')\n permitted_obj_pks = []\n\n # Loop through all 'private' objects to see if the user has permission to change\n # it. If so add its PK to the list of permitted objects.\n for obj in private_objects:\n if user.has_perm(permission, obj):\n permitted_obj_pks.append(obj.pk)\n\n queryset = public_objects | queryset.filter(pk__in=permitted_obj_pks).distinct()\n\n return queryset\n\n # filter projects if user is \"non-privileged\"\n if user.is_anonymous() or not (user.is_superuser or user.is_admin):\n queryset = projects_filter_for_non_privileged_users(user, queryset)\n\n return queryset\n", "path": "akvo/rest/viewsets.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models.fields.related import ForeignKey, ForeignObject\n\nfrom akvo.rest.models import TastyTokenAuthentication\n\nfrom rest_framework import authentication, filters, permissions, viewsets\n\nfrom .filters import RSRGenericFilterBackend\n\n\nclass SafeMethodsPermissions(permissions.DjangoObjectPermissions):\n \"\"\"\n Base class to allow any safe methods ('GET', 'OPTIONS' and 'HEAD') without needing to\n authenticate.\n \"\"\"\n\n def has_permission(self, request, view):\n if request.method in permissions.SAFE_METHODS:\n return True\n return super(SafeMethodsPermissions, self).has_permission(request, view)\n\n\nclass BaseRSRViewSet(viewsets.ModelViewSet):\n \"\"\"\n Base class used for the view sets for RSR models. Provides unified auth and perms settings.\n \"\"\"\n authentication_classes = (authentication.SessionAuthentication, TastyTokenAuthentication, )\n permission_classes = (SafeMethodsPermissions, )\n filter_backends = (filters.OrderingFilter, RSRGenericFilterBackend,)\n ordering_fields = '__all__'\n\n def get_queryset(self):\n\n def django_filter_filters(request):\n \"\"\"\n Support emulating the DjangoFilterBackend-based filtering that some views used to have\n \"\"\"\n # query string keys reserved by the RSRGenericFilterBackend\n qs_params = ['filter', 'exclude', 'select_related', 'prefetch_related', ]\n # query string keys used by core DRF, OrderingFilter and Akvo custom views\n exclude_params = ['limit', 'format', 'page', 'ordering', 'partner_type', 'sync_owner',\n 'reporting_org', ]\n filters = {}\n for key in request.QUERY_PARAMS.keys():\n if key not in qs_params + exclude_params and not key.startswith('image_thumb_'):\n filters.update({key: request.QUERY_PARAMS.get(key)})\n return filters\n\n def get_lookups_from_filters(legacy_filters):\n \"\"\"\n Cast the values in DjangoFilterBackend-styled query string filters to correct types to\n be able to use them in regular queryset-filter() calls\n \"\"\"\n # types of lookups supported by the views using DjangoFilterBackend\n LEGACY_FIELD_LOOKUPS = ['exact', 'contains', 'icontains', 'gt', 'gte', 'lt',\n 'lte', ]\n query_set_lookups = []\n for key, value in legacy_filters.items():\n parts = key.split('__')\n if parts[-1] in LEGACY_FIELD_LOOKUPS:\n parts = parts[:-1]\n model = queryset.model\n for part in parts:\n field_object, related_model, direct, m2m = model._meta.get_field_by_name(\n part)\n if direct:\n if issubclass(field_object.__class__, ForeignObject):\n model = field_object.related.parent_model\n else:\n value = field_object.to_python(value)\n break\n else:\n model = related_model\n query_set_lookups += [{key: value}]\n return query_set_lookups\n\n queryset = super(BaseRSRViewSet, self).get_queryset()\n\n # support for old DjangoFilterBackend-based filtering\n # find all \"old styled\" filters\n legacy_filters = django_filter_filters(self.request)\n # create lookup dicts from the filters found\n lookups = get_lookups_from_filters(legacy_filters)\n for lookup in lookups:\n queryset = queryset.filter(**lookup)\n\n return queryset\n\n\nclass PublicProjectViewSet(BaseRSRViewSet):\n \"\"\"\n Only public projects or objects related to public projects will be shown.\n \"\"\"\n # project_relation is the default string for constructing a field lookup to the is_public field\n # on the related Project. Override this in when the viewset is for a model that doesn't have a\n # direct FK to Project or the FK field isn't named project. E.g. IndicatorViewSet:\n # project_relation = 'result__project__'\n # The lookup is used to filter out objects associated with private projects, see below.\n project_relation = 'project__'\n\n def get_queryset(self):\n\n request = self.request\n user = request.user\n\n queryset = super(PublicProjectViewSet, self).get_queryset()\n\n def projects_filter_for_non_privileged_users(user, queryset):\n # Construct the public projects filter field lookup.\n project_filter = self.project_relation + 'is_public'\n\n # Filter the object list into two querysets;\n # One where the related Projects are public and one where they are private\n public_objects = queryset.filter(**{project_filter: True}).distinct()\n private_objects = queryset.filter(**{project_filter: False}).distinct()\n\n # In case of an anonymous user, only return the public objects\n if user.is_anonymous():\n queryset = public_objects\n\n # Otherwise, check to which objects the user has (change) permission\n elif private_objects:\n permission = type(private_objects[0])._meta.db_table.replace('_', '.change_')\n permitted_obj_pks = []\n\n # Loop through all 'private' objects to see if the user has permission to change\n # it. If so add its PK to the list of permitted objects.\n for obj in private_objects:\n if user.has_perm(permission, obj):\n permitted_obj_pks.append(obj.pk)\n\n queryset = public_objects | queryset.filter(pk__in=permitted_obj_pks).distinct()\n\n return queryset\n\n # filter projects if user is \"non-privileged\"\n if user.is_anonymous() or not (user.is_superuser or user.is_admin):\n queryset = projects_filter_for_non_privileged_users(user, queryset)\n\n return queryset\n", "path": "akvo/rest/viewsets.py"}]}
| 3,504 | 195 |
gh_patches_debug_14030
|
rasdani/github-patches
|
git_diff
|
DataDog__dd-trace-py-5182
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Intermittent `RuntimeError: the memalloc module was not started` error
### Which version of dd-trace-py are you using?
`ddtrace==0.57.0`
### What is the result that you get?
`RuntimeError: the memalloc module was not started`

### What is the result that you expected?
No errors.
This seems to be happening a few times a day.
We have tried setting `DD_PROFILING_HEAP_ENABLED=False` and `DD_PROFILING_MEMALLOC=0` in the environment, but the errors continue to appear.
Configuration in Django:
```
import os
from ddtrace import config, tracer
# DataDog Setup
tracer.configure(hostname=os.environ.get("HOST_IP"))
tracer.configure(enabled=True)
tracer.set_tags(
{"env": os.environ.get("ENVIRONMENT"), "namespace": os.environ.get("NAMESPACE")}
)
config.django["analytics_enabled"] = True
config.django["cache_service_name"] = "xxx-cache"
config.django["database_service_name_prefix"] = "xxx"
config.django["distributed_tracing_enabled"] = True
config.django["instrument_middleware"] = True
config.django["service_name"] = "xxx"
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ddtrace/profiling/collector/memalloc.py`
Content:
```
1 # -*- encoding: utf-8 -*-
2 import logging
3 import math
4 import os
5 import threading
6 import typing
7
8 import attr
9
10
11 try:
12 from ddtrace.profiling.collector import _memalloc
13 except ImportError:
14 _memalloc = None # type: ignore[assignment]
15
16 from ddtrace.internal.utils import attr as attr_utils
17 from ddtrace.internal.utils import formats
18 from ddtrace.profiling import _threading
19 from ddtrace.profiling import collector
20 from ddtrace.profiling import event
21
22
23 LOG = logging.getLogger(__name__)
24
25
26 @event.event_class
27 class MemoryAllocSampleEvent(event.StackBasedEvent):
28 """A sample storing memory allocation tracked."""
29
30 size = attr.ib(default=0, type=int)
31 """Allocation size in bytes."""
32
33 capture_pct = attr.ib(default=None, type=float)
34 """The capture percentage."""
35
36 nevents = attr.ib(default=0, type=int)
37 """The total number of allocation events sampled."""
38
39
40 @event.event_class
41 class MemoryHeapSampleEvent(event.StackBasedEvent):
42 """A sample storing memory allocation tracked."""
43
44 size = attr.ib(default=0, type=int)
45 """Allocation size in bytes."""
46
47 sample_size = attr.ib(default=0, type=int)
48 """The sampling size."""
49
50
51 def _get_default_heap_sample_size(
52 default_heap_sample_size=1024 * 1024, # type: int
53 ):
54 # type: (...) -> int
55 heap_sample_size = os.environ.get("DD_PROFILING_HEAP_SAMPLE_SIZE")
56 if heap_sample_size is not None:
57 return int(heap_sample_size)
58
59 if not formats.asbool(os.environ.get("DD_PROFILING_HEAP_ENABLED", "1")):
60 return 0
61
62 try:
63 from ddtrace.vendor import psutil
64
65 total_mem = psutil.swap_memory().total + psutil.virtual_memory().total
66 except Exception:
67 LOG.warning(
68 "Unable to get total memory available, using default value of %d KB",
69 default_heap_sample_size / 1024,
70 exc_info=True,
71 )
72 return default_heap_sample_size
73
74 # This is TRACEBACK_ARRAY_MAX_COUNT
75 max_samples = 2 ** 16
76
77 return max(math.ceil(total_mem / max_samples), default_heap_sample_size)
78
79
80 @attr.s
81 class MemoryCollector(collector.PeriodicCollector):
82 """Memory allocation collector."""
83
84 _DEFAULT_MAX_EVENTS = 16
85 _DEFAULT_INTERVAL = 0.5
86
87 # Arbitrary interval to empty the _memalloc event buffer
88 _interval = attr.ib(default=_DEFAULT_INTERVAL, repr=False)
89
90 # TODO make this dynamic based on the 1. interval and 2. the max number of events allowed in the Recorder
91 _max_events = attr.ib(
92 factory=attr_utils.from_env(
93 "_DD_PROFILING_MEMORY_EVENTS_BUFFER",
94 _DEFAULT_MAX_EVENTS,
95 int,
96 )
97 )
98 max_nframe = attr.ib(factory=attr_utils.from_env("DD_PROFILING_MAX_FRAMES", 64, int))
99 heap_sample_size = attr.ib(type=int, factory=_get_default_heap_sample_size)
100 ignore_profiler = attr.ib(factory=attr_utils.from_env("DD_PROFILING_IGNORE_PROFILER", False, formats.asbool))
101
102 def _start_service(self):
103 # type: (...) -> None
104 """Start collecting memory profiles."""
105 if _memalloc is None:
106 raise collector.CollectorUnavailable
107
108 _memalloc.start(self.max_nframe, self._max_events, self.heap_sample_size)
109
110 super(MemoryCollector, self)._start_service()
111
112 def _stop_service(self):
113 # type: (...) -> None
114 super(MemoryCollector, self)._stop_service()
115
116 if _memalloc is not None:
117 try:
118 _memalloc.stop()
119 except RuntimeError:
120 pass
121
122 def _get_thread_id_ignore_set(self):
123 # type: () -> typing.Set[int]
124 # This method is not perfect and prone to race condition in theory, but very little in practice.
125 # Anyhow it's not a big deal — it's a best effort feature.
126 return {
127 thread.ident
128 for thread in threading.enumerate()
129 if getattr(thread, "_ddtrace_profiling_ignore", False) and thread.ident is not None
130 }
131
132 def snapshot(self):
133 thread_id_ignore_set = self._get_thread_id_ignore_set()
134 return (
135 tuple(
136 MemoryHeapSampleEvent(
137 thread_id=thread_id,
138 thread_name=_threading.get_thread_name(thread_id),
139 thread_native_id=_threading.get_thread_native_id(thread_id),
140 frames=stack,
141 nframes=nframes,
142 size=size,
143 sample_size=self.heap_sample_size,
144 )
145 for (stack, nframes, thread_id), size in _memalloc.heap()
146 if not self.ignore_profiler or thread_id not in thread_id_ignore_set
147 ),
148 )
149
150 def collect(self):
151 events, count, alloc_count = _memalloc.iter_events()
152 capture_pct = 100 * count / alloc_count
153 thread_id_ignore_set = self._get_thread_id_ignore_set()
154 # TODO: The event timestamp is slightly off since it's going to be the time we copy the data from the
155 # _memalloc buffer to our Recorder. This is fine for now, but we might want to store the nanoseconds
156 # timestamp in C and then return it via iter_events.
157 return (
158 tuple(
159 MemoryAllocSampleEvent(
160 thread_id=thread_id,
161 thread_name=_threading.get_thread_name(thread_id),
162 thread_native_id=_threading.get_thread_native_id(thread_id),
163 frames=stack,
164 nframes=nframes,
165 size=size,
166 capture_pct=capture_pct,
167 nevents=alloc_count,
168 )
169 for (stack, nframes, thread_id), size, domain in events
170 if not self.ignore_profiler or thread_id not in thread_id_ignore_set
171 ),
172 )
173
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ddtrace/profiling/collector/memalloc.py b/ddtrace/profiling/collector/memalloc.py
--- a/ddtrace/profiling/collector/memalloc.py
+++ b/ddtrace/profiling/collector/memalloc.py
@@ -148,7 +148,13 @@
)
def collect(self):
- events, count, alloc_count = _memalloc.iter_events()
+ try:
+ events, count, alloc_count = _memalloc.iter_events()
+ except RuntimeError:
+ # DEV: This can happen if either _memalloc has not been started or has been stopped.
+ LOG.debug("Unable to collect memory events from process %d", os.getpid(), exc_info=True)
+ return tuple()
+
capture_pct = 100 * count / alloc_count
thread_id_ignore_set = self._get_thread_id_ignore_set()
# TODO: The event timestamp is slightly off since it's going to be the time we copy the data from the
|
{"golden_diff": "diff --git a/ddtrace/profiling/collector/memalloc.py b/ddtrace/profiling/collector/memalloc.py\n--- a/ddtrace/profiling/collector/memalloc.py\n+++ b/ddtrace/profiling/collector/memalloc.py\n@@ -148,7 +148,13 @@\n )\n \n def collect(self):\n- events, count, alloc_count = _memalloc.iter_events()\n+ try:\n+ events, count, alloc_count = _memalloc.iter_events()\n+ except RuntimeError:\n+ # DEV: This can happen if either _memalloc has not been started or has been stopped.\n+ LOG.debug(\"Unable to collect memory events from process %d\", os.getpid(), exc_info=True)\n+ return tuple()\n+\n capture_pct = 100 * count / alloc_count\n thread_id_ignore_set = self._get_thread_id_ignore_set()\n # TODO: The event timestamp is slightly off since it's going to be the time we copy the data from the\n", "issue": "Intermittent `RuntimeError: the memalloc module was not started` error\n### Which version of dd-trace-py are you using?\r\n\r\n`ddtrace==0.57.0`\r\n\r\n### What is the result that you get?\r\n\r\n`RuntimeError: the memalloc module was not started`\r\n\r\n\r\n\r\n### What is the result that you expected?\r\n\r\nNo errors.\r\n\r\nThis seems to be happening a few times a day.\r\n\r\nWe have tried setting `DD_PROFILING_HEAP_ENABLED=False` and `DD_PROFILING_MEMALLOC=0` in the environment, but the errors continue to appear.\r\n\r\n\r\nConfiguration in Django:\r\n```\r\nimport os\r\nfrom ddtrace import config, tracer\r\n\r\n# DataDog Setup\r\ntracer.configure(hostname=os.environ.get(\"HOST_IP\"))\r\ntracer.configure(enabled=True)\r\ntracer.set_tags(\r\n {\"env\": os.environ.get(\"ENVIRONMENT\"), \"namespace\": os.environ.get(\"NAMESPACE\")}\r\n)\r\nconfig.django[\"analytics_enabled\"] = True\r\nconfig.django[\"cache_service_name\"] = \"xxx-cache\"\r\nconfig.django[\"database_service_name_prefix\"] = \"xxx\"\r\nconfig.django[\"distributed_tracing_enabled\"] = True\r\nconfig.django[\"instrument_middleware\"] = True\r\nconfig.django[\"service_name\"] = \"xxx\"\r\n\r\n```\r\n\n", "before_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\nimport math\nimport os\nimport threading\nimport typing\n\nimport attr\n\n\ntry:\n from ddtrace.profiling.collector import _memalloc\nexcept ImportError:\n _memalloc = None # type: ignore[assignment]\n\nfrom ddtrace.internal.utils import attr as attr_utils\nfrom ddtrace.internal.utils import formats\nfrom ddtrace.profiling import _threading\nfrom ddtrace.profiling import collector\nfrom ddtrace.profiling import event\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected]_class\nclass MemoryAllocSampleEvent(event.StackBasedEvent):\n \"\"\"A sample storing memory allocation tracked.\"\"\"\n\n size = attr.ib(default=0, type=int)\n \"\"\"Allocation size in bytes.\"\"\"\n\n capture_pct = attr.ib(default=None, type=float)\n \"\"\"The capture percentage.\"\"\"\n\n nevents = attr.ib(default=0, type=int)\n \"\"\"The total number of allocation events sampled.\"\"\"\n\n\[email protected]_class\nclass MemoryHeapSampleEvent(event.StackBasedEvent):\n \"\"\"A sample storing memory allocation tracked.\"\"\"\n\n size = attr.ib(default=0, type=int)\n \"\"\"Allocation size in bytes.\"\"\"\n\n sample_size = attr.ib(default=0, type=int)\n \"\"\"The sampling size.\"\"\"\n\n\ndef _get_default_heap_sample_size(\n default_heap_sample_size=1024 * 1024, # type: int\n):\n # type: (...) -> int\n heap_sample_size = os.environ.get(\"DD_PROFILING_HEAP_SAMPLE_SIZE\")\n if heap_sample_size is not None:\n return int(heap_sample_size)\n\n if not formats.asbool(os.environ.get(\"DD_PROFILING_HEAP_ENABLED\", \"1\")):\n return 0\n\n try:\n from ddtrace.vendor import psutil\n\n total_mem = psutil.swap_memory().total + psutil.virtual_memory().total\n except Exception:\n LOG.warning(\n \"Unable to get total memory available, using default value of %d KB\",\n default_heap_sample_size / 1024,\n exc_info=True,\n )\n return default_heap_sample_size\n\n # This is TRACEBACK_ARRAY_MAX_COUNT\n max_samples = 2 ** 16\n\n return max(math.ceil(total_mem / max_samples), default_heap_sample_size)\n\n\[email protected]\nclass MemoryCollector(collector.PeriodicCollector):\n \"\"\"Memory allocation collector.\"\"\"\n\n _DEFAULT_MAX_EVENTS = 16\n _DEFAULT_INTERVAL = 0.5\n\n # Arbitrary interval to empty the _memalloc event buffer\n _interval = attr.ib(default=_DEFAULT_INTERVAL, repr=False)\n\n # TODO make this dynamic based on the 1. interval and 2. the max number of events allowed in the Recorder\n _max_events = attr.ib(\n factory=attr_utils.from_env(\n \"_DD_PROFILING_MEMORY_EVENTS_BUFFER\",\n _DEFAULT_MAX_EVENTS,\n int,\n )\n )\n max_nframe = attr.ib(factory=attr_utils.from_env(\"DD_PROFILING_MAX_FRAMES\", 64, int))\n heap_sample_size = attr.ib(type=int, factory=_get_default_heap_sample_size)\n ignore_profiler = attr.ib(factory=attr_utils.from_env(\"DD_PROFILING_IGNORE_PROFILER\", False, formats.asbool))\n\n def _start_service(self):\n # type: (...) -> None\n \"\"\"Start collecting memory profiles.\"\"\"\n if _memalloc is None:\n raise collector.CollectorUnavailable\n\n _memalloc.start(self.max_nframe, self._max_events, self.heap_sample_size)\n\n super(MemoryCollector, self)._start_service()\n\n def _stop_service(self):\n # type: (...) -> None\n super(MemoryCollector, self)._stop_service()\n\n if _memalloc is not None:\n try:\n _memalloc.stop()\n except RuntimeError:\n pass\n\n def _get_thread_id_ignore_set(self):\n # type: () -> typing.Set[int]\n # This method is not perfect and prone to race condition in theory, but very little in practice.\n # Anyhow it's not a big deal \u2014 it's a best effort feature.\n return {\n thread.ident\n for thread in threading.enumerate()\n if getattr(thread, \"_ddtrace_profiling_ignore\", False) and thread.ident is not None\n }\n\n def snapshot(self):\n thread_id_ignore_set = self._get_thread_id_ignore_set()\n return (\n tuple(\n MemoryHeapSampleEvent(\n thread_id=thread_id,\n thread_name=_threading.get_thread_name(thread_id),\n thread_native_id=_threading.get_thread_native_id(thread_id),\n frames=stack,\n nframes=nframes,\n size=size,\n sample_size=self.heap_sample_size,\n )\n for (stack, nframes, thread_id), size in _memalloc.heap()\n if not self.ignore_profiler or thread_id not in thread_id_ignore_set\n ),\n )\n\n def collect(self):\n events, count, alloc_count = _memalloc.iter_events()\n capture_pct = 100 * count / alloc_count\n thread_id_ignore_set = self._get_thread_id_ignore_set()\n # TODO: The event timestamp is slightly off since it's going to be the time we copy the data from the\n # _memalloc buffer to our Recorder. This is fine for now, but we might want to store the nanoseconds\n # timestamp in C and then return it via iter_events.\n return (\n tuple(\n MemoryAllocSampleEvent(\n thread_id=thread_id,\n thread_name=_threading.get_thread_name(thread_id),\n thread_native_id=_threading.get_thread_native_id(thread_id),\n frames=stack,\n nframes=nframes,\n size=size,\n capture_pct=capture_pct,\n nevents=alloc_count,\n )\n for (stack, nframes, thread_id), size, domain in events\n if not self.ignore_profiler or thread_id not in thread_id_ignore_set\n ),\n )\n", "path": "ddtrace/profiling/collector/memalloc.py"}], "after_files": [{"content": "# -*- encoding: utf-8 -*-\nimport logging\nimport math\nimport os\nimport threading\nimport typing\n\nimport attr\n\n\ntry:\n from ddtrace.profiling.collector import _memalloc\nexcept ImportError:\n _memalloc = None # type: ignore[assignment]\n\nfrom ddtrace.internal.utils import attr as attr_utils\nfrom ddtrace.internal.utils import formats\nfrom ddtrace.profiling import _threading\nfrom ddtrace.profiling import collector\nfrom ddtrace.profiling import event\n\n\nLOG = logging.getLogger(__name__)\n\n\[email protected]_class\nclass MemoryAllocSampleEvent(event.StackBasedEvent):\n \"\"\"A sample storing memory allocation tracked.\"\"\"\n\n size = attr.ib(default=0, type=int)\n \"\"\"Allocation size in bytes.\"\"\"\n\n capture_pct = attr.ib(default=None, type=float)\n \"\"\"The capture percentage.\"\"\"\n\n nevents = attr.ib(default=0, type=int)\n \"\"\"The total number of allocation events sampled.\"\"\"\n\n\[email protected]_class\nclass MemoryHeapSampleEvent(event.StackBasedEvent):\n \"\"\"A sample storing memory allocation tracked.\"\"\"\n\n size = attr.ib(default=0, type=int)\n \"\"\"Allocation size in bytes.\"\"\"\n\n sample_size = attr.ib(default=0, type=int)\n \"\"\"The sampling size.\"\"\"\n\n\ndef _get_default_heap_sample_size(\n default_heap_sample_size=1024 * 1024, # type: int\n):\n # type: (...) -> int\n heap_sample_size = os.environ.get(\"DD_PROFILING_HEAP_SAMPLE_SIZE\")\n if heap_sample_size is not None:\n return int(heap_sample_size)\n\n if not formats.asbool(os.environ.get(\"DD_PROFILING_HEAP_ENABLED\", \"1\")):\n return 0\n\n try:\n from ddtrace.vendor import psutil\n\n total_mem = psutil.swap_memory().total + psutil.virtual_memory().total\n except Exception:\n LOG.warning(\n \"Unable to get total memory available, using default value of %d KB\",\n default_heap_sample_size / 1024,\n exc_info=True,\n )\n return default_heap_sample_size\n\n # This is TRACEBACK_ARRAY_MAX_COUNT\n max_samples = 2 ** 16\n\n return max(math.ceil(total_mem / max_samples), default_heap_sample_size)\n\n\[email protected]\nclass MemoryCollector(collector.PeriodicCollector):\n \"\"\"Memory allocation collector.\"\"\"\n\n _DEFAULT_MAX_EVENTS = 16\n _DEFAULT_INTERVAL = 0.5\n\n # Arbitrary interval to empty the _memalloc event buffer\n _interval = attr.ib(default=_DEFAULT_INTERVAL, repr=False)\n\n # TODO make this dynamic based on the 1. interval and 2. the max number of events allowed in the Recorder\n _max_events = attr.ib(\n factory=attr_utils.from_env(\n \"_DD_PROFILING_MEMORY_EVENTS_BUFFER\",\n _DEFAULT_MAX_EVENTS,\n int,\n )\n )\n max_nframe = attr.ib(factory=attr_utils.from_env(\"DD_PROFILING_MAX_FRAMES\", 64, int))\n heap_sample_size = attr.ib(type=int, factory=_get_default_heap_sample_size)\n ignore_profiler = attr.ib(factory=attr_utils.from_env(\"DD_PROFILING_IGNORE_PROFILER\", False, formats.asbool))\n\n def _start_service(self):\n # type: (...) -> None\n \"\"\"Start collecting memory profiles.\"\"\"\n if _memalloc is None:\n raise collector.CollectorUnavailable\n\n _memalloc.start(self.max_nframe, self._max_events, self.heap_sample_size)\n\n super(MemoryCollector, self)._start_service()\n\n def _stop_service(self):\n # type: (...) -> None\n super(MemoryCollector, self)._stop_service()\n\n if _memalloc is not None:\n try:\n _memalloc.stop()\n except RuntimeError:\n pass\n\n def _get_thread_id_ignore_set(self):\n # type: () -> typing.Set[int]\n # This method is not perfect and prone to race condition in theory, but very little in practice.\n # Anyhow it's not a big deal \u2014 it's a best effort feature.\n return {\n thread.ident\n for thread in threading.enumerate()\n if getattr(thread, \"_ddtrace_profiling_ignore\", False) and thread.ident is not None\n }\n\n def snapshot(self):\n thread_id_ignore_set = self._get_thread_id_ignore_set()\n return (\n tuple(\n MemoryHeapSampleEvent(\n thread_id=thread_id,\n thread_name=_threading.get_thread_name(thread_id),\n thread_native_id=_threading.get_thread_native_id(thread_id),\n frames=stack,\n nframes=nframes,\n size=size,\n sample_size=self.heap_sample_size,\n )\n for (stack, nframes, thread_id), size in _memalloc.heap()\n if not self.ignore_profiler or thread_id not in thread_id_ignore_set\n ),\n )\n\n def collect(self):\n try:\n events, count, alloc_count = _memalloc.iter_events()\n except RuntimeError:\n # DEV: This can happen if either _memalloc has not been started or has been stopped.\n LOG.debug(\"Unable to collect memory events from process %d\", os.getpid(), exc_info=True)\n return tuple()\n\n capture_pct = 100 * count / alloc_count\n thread_id_ignore_set = self._get_thread_id_ignore_set()\n # TODO: The event timestamp is slightly off since it's going to be the time we copy the data from the\n # _memalloc buffer to our Recorder. This is fine for now, but we might want to store the nanoseconds\n # timestamp in C and then return it via iter_events.\n return (\n tuple(\n MemoryAllocSampleEvent(\n thread_id=thread_id,\n thread_name=_threading.get_thread_name(thread_id),\n thread_native_id=_threading.get_thread_native_id(thread_id),\n frames=stack,\n nframes=nframes,\n size=size,\n capture_pct=capture_pct,\n nevents=alloc_count,\n )\n for (stack, nframes, thread_id), size, domain in events\n if not self.ignore_profiler or thread_id not in thread_id_ignore_set\n ),\n )\n", "path": "ddtrace/profiling/collector/memalloc.py"}]}
| 2,313 | 222 |
gh_patches_debug_40814
|
rasdani/github-patches
|
git_diff
|
twisted__twisted-11917
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Update towncrier
We use on old version of towncrier with a custom script to detect release branch.
The latest towncrier should have support to checking a release branch.
We should update to towncrier and remove our custom script.
Any good stuff used in twisted/twisted custom towncrier script should be moved upstream.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/twisted/python/_release.py`
Content:
```
1 # -*- test-case-name: twisted.python.test.test_release -*-
2 # Copyright (c) Twisted Matrix Laboratories.
3 # See LICENSE for details.
4
5 """
6 Twisted's automated release system.
7
8 This module is only for use within Twisted's release system. If you are anyone
9 else, do not use it. The interface and behaviour will change without notice.
10
11 Only Linux is supported by this code. It should not be used by any tools
12 which must run on multiple platforms (eg the setup.py script).
13 """
14
15 import os
16 import sys
17 from subprocess import STDOUT, CalledProcessError, check_output
18 from typing import Dict
19
20 from zope.interface import Interface, implementer
21
22 from twisted.python.compat import execfile
23
24 # Types of newsfragments.
25 NEWSFRAGMENT_TYPES = ["doc", "bugfix", "misc", "feature", "removal"]
26
27
28 def runCommand(args, **kwargs):
29 """Execute a vector of arguments.
30
31 This is a wrapper around L{subprocess.check_output}, so it takes
32 the same arguments as L{subprocess.Popen} with one difference: all
33 arguments after the vector must be keyword arguments.
34
35 @param args: arguments passed to L{subprocess.check_output}
36 @param kwargs: keyword arguments passed to L{subprocess.check_output}
37 @return: command output
38 @rtype: L{bytes}
39 """
40 kwargs["stderr"] = STDOUT
41 return check_output(args, **kwargs)
42
43
44 class IVCSCommand(Interface):
45 """
46 An interface for VCS commands.
47 """
48
49 def ensureIsWorkingDirectory(path):
50 """
51 Ensure that C{path} is a working directory of this VCS.
52
53 @type path: L{twisted.python.filepath.FilePath}
54 @param path: The path to check.
55 """
56
57 def isStatusClean(path):
58 """
59 Return the Git status of the files in the specified path.
60
61 @type path: L{twisted.python.filepath.FilePath}
62 @param path: The path to get the status from (can be a directory or a
63 file.)
64 """
65
66 def remove(path):
67 """
68 Remove the specified path from a the VCS.
69
70 @type path: L{twisted.python.filepath.FilePath}
71 @param path: The path to remove from the repository.
72 """
73
74 def exportTo(fromDir, exportDir):
75 """
76 Export the content of the VCSrepository to the specified directory.
77
78 @type fromDir: L{twisted.python.filepath.FilePath}
79 @param fromDir: The path to the VCS repository to export.
80
81 @type exportDir: L{twisted.python.filepath.FilePath}
82 @param exportDir: The directory to export the content of the
83 repository to. This directory doesn't have to exist prior to
84 exporting the repository.
85 """
86
87
88 @implementer(IVCSCommand)
89 class GitCommand:
90 """
91 Subset of Git commands to release Twisted from a Git repository.
92 """
93
94 @staticmethod
95 def ensureIsWorkingDirectory(path):
96 """
97 Ensure that C{path} is a Git working directory.
98
99 @type path: L{twisted.python.filepath.FilePath}
100 @param path: The path to check.
101 """
102 try:
103 runCommand(["git", "rev-parse"], cwd=path.path)
104 except (CalledProcessError, OSError):
105 raise NotWorkingDirectory(
106 f"{path.path} does not appear to be a Git repository."
107 )
108
109 @staticmethod
110 def isStatusClean(path):
111 """
112 Return the Git status of the files in the specified path.
113
114 @type path: L{twisted.python.filepath.FilePath}
115 @param path: The path to get the status from (can be a directory or a
116 file.)
117 """
118 status = runCommand(["git", "-C", path.path, "status", "--short"]).strip()
119 return status == b""
120
121 @staticmethod
122 def remove(path):
123 """
124 Remove the specified path from a Git repository.
125
126 @type path: L{twisted.python.filepath.FilePath}
127 @param path: The path to remove from the repository.
128 """
129 runCommand(["git", "-C", path.dirname(), "rm", path.path])
130
131 @staticmethod
132 def exportTo(fromDir, exportDir):
133 """
134 Export the content of a Git repository to the specified directory.
135
136 @type fromDir: L{twisted.python.filepath.FilePath}
137 @param fromDir: The path to the Git repository to export.
138
139 @type exportDir: L{twisted.python.filepath.FilePath}
140 @param exportDir: The directory to export the content of the
141 repository to. This directory doesn't have to exist prior to
142 exporting the repository.
143 """
144 runCommand(
145 [
146 "git",
147 "-C",
148 fromDir.path,
149 "checkout-index",
150 "--all",
151 "--force",
152 # prefix has to end up with a "/" so that files get copied
153 # to a directory whose name is the prefix.
154 "--prefix",
155 exportDir.path + "/",
156 ]
157 )
158
159
160 def getRepositoryCommand(directory):
161 """
162 Detect the VCS used in the specified directory and return a L{GitCommand}
163 if the directory is a Git repository. If the directory is not git, it
164 raises a L{NotWorkingDirectory} exception.
165
166 @type directory: L{FilePath}
167 @param directory: The directory to detect the VCS used from.
168
169 @rtype: L{GitCommand}
170
171 @raise NotWorkingDirectory: if no supported VCS can be found from the
172 specified directory.
173 """
174 try:
175 GitCommand.ensureIsWorkingDirectory(directory)
176 return GitCommand
177 except (NotWorkingDirectory, OSError):
178 # It's not Git, but that's okay, eat the error
179 pass
180
181 raise NotWorkingDirectory(f"No supported VCS can be found in {directory.path}")
182
183
184 class Project:
185 """
186 A representation of a project that has a version.
187
188 @ivar directory: A L{twisted.python.filepath.FilePath} pointing to the base
189 directory of a Twisted-style Python package. The package should contain
190 a C{_version.py} file and a C{newsfragments} directory that contains a
191 C{README} file.
192 """
193
194 def __init__(self, directory):
195 self.directory = directory
196
197 def __repr__(self) -> str:
198 return f"{self.__class__.__name__}({self.directory!r})"
199
200 def getVersion(self):
201 """
202 @return: A L{incremental.Version} specifying the version number of the
203 project based on live python modules.
204 """
205 namespace: Dict[str, object] = {}
206 directory = self.directory
207 while not namespace:
208 if directory.path == "/":
209 raise Exception("Not inside a Twisted project.")
210 elif not directory.basename() == "twisted":
211 directory = directory.parent()
212 else:
213 execfile(directory.child("_version.py").path, namespace)
214 return namespace["__version__"]
215
216
217 def findTwistedProjects(baseDirectory):
218 """
219 Find all Twisted-style projects beneath a base directory.
220
221 @param baseDirectory: A L{twisted.python.filepath.FilePath} to look inside.
222 @return: A list of L{Project}.
223 """
224 projects = []
225 for filePath in baseDirectory.walk():
226 if filePath.basename() == "newsfragments":
227 projectDirectory = filePath.parent()
228 projects.append(Project(projectDirectory))
229 return projects
230
231
232 def replaceInFile(filename, oldToNew):
233 """
234 I replace the text `oldstr' with `newstr' in `filename' using science.
235 """
236 os.rename(filename, filename + ".bak")
237 with open(filename + ".bak") as f:
238 d = f.read()
239 for k, v in oldToNew.items():
240 d = d.replace(k, v)
241 with open(filename + ".new", "w") as f:
242 f.write(d)
243 os.rename(filename + ".new", filename)
244 os.unlink(filename + ".bak")
245
246
247 class NoDocumentsFound(Exception):
248 """
249 Raised when no input documents are found.
250 """
251
252
253 def filePathDelta(origin, destination):
254 """
255 Return a list of strings that represent C{destination} as a path relative
256 to C{origin}.
257
258 It is assumed that both paths represent directories, not files. That is to
259 say, the delta of L{twisted.python.filepath.FilePath} /foo/bar to
260 L{twisted.python.filepath.FilePath} /foo/baz will be C{../baz},
261 not C{baz}.
262
263 @type origin: L{twisted.python.filepath.FilePath}
264 @param origin: The origin of the relative path.
265
266 @type destination: L{twisted.python.filepath.FilePath}
267 @param destination: The destination of the relative path.
268 """
269 commonItems = 0
270 path1 = origin.path.split(os.sep)
271 path2 = destination.path.split(os.sep)
272 for elem1, elem2 in zip(path1, path2):
273 if elem1 == elem2:
274 commonItems += 1
275 else:
276 break
277 path = [".."] * (len(path1) - commonItems)
278 return path + path2[commonItems:]
279
280
281 class NotWorkingDirectory(Exception):
282 """
283 Raised when a directory does not appear to be a repository directory of a
284 supported VCS.
285 """
286
287
288 class CheckNewsfragmentScript:
289 """
290 A thing for checking whether a checkout has a newsfragment.
291 """
292
293 def __init__(self, _print):
294 self._print = _print
295
296 def main(self, args):
297 """
298 Run the script.
299
300 @type args: L{list} of L{str}
301 @param args: The command line arguments to process. This must contain
302 one string: the path to the root of the Twisted checkout.
303 """
304 if len(args) != 1:
305 sys.exit("Must specify one argument: the Twisted checkout")
306
307 encoding = sys.stdout.encoding or "ascii"
308 location = os.path.abspath(args[0])
309
310 branch = (
311 runCommand([b"git", b"rev-parse", b"--abbrev-ref", "HEAD"], cwd=location)
312 .decode(encoding)
313 .strip()
314 )
315
316 # diff-filter=d to exclude deleted newsfiles (which will happen on the
317 # release branch)
318 r = (
319 runCommand(
320 [
321 b"git",
322 b"diff",
323 b"--name-only",
324 b"origin/trunk...",
325 b"--diff-filter=d",
326 ],
327 cwd=location,
328 )
329 .decode(encoding)
330 .strip()
331 )
332
333 if not r:
334 self._print("On trunk or no diffs from trunk; no need to look at this.")
335 sys.exit(0)
336
337 files = r.strip().split(os.linesep)
338
339 self._print("Looking at these files:")
340 for change in files:
341 self._print(change)
342 self._print("----")
343
344 if len(files) == 1:
345 if files[0] == os.sep.join(["docs", "fun", "Twisted.Quotes"]):
346 self._print("Quotes change only; no newsfragment needed.")
347 sys.exit(0)
348
349 newsfragments = []
350
351 for change in files:
352 if os.sep + "newsfragments" + os.sep in change:
353 if "." in change and change.rsplit(".", 1)[1] in NEWSFRAGMENT_TYPES:
354 newsfragments.append(change)
355
356 if branch.startswith("release-"):
357 if newsfragments:
358 self._print("No newsfragments should be on the release branch.")
359 sys.exit(1)
360 else:
361 self._print("Release branch with no newsfragments, all good.")
362 sys.exit(0)
363
364 if os.environ.get("GITHUB_HEAD_REF", "") == "pre-commit-ci-update-config":
365 # The run was triggered by pre-commit.ci.
366 if newsfragments:
367 self._print(
368 "No newsfragments should be present on an autoupdated branch."
369 )
370 sys.exit(1)
371 else:
372 self._print("Autoupdated branch with no newsfragments, all good.")
373 sys.exit(0)
374
375 for change in newsfragments:
376 self._print("Found " + change)
377 sys.exit(0)
378
379 self._print("No newsfragment found. Have you committed it?")
380 sys.exit(1)
381
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/twisted/python/_release.py b/src/twisted/python/_release.py
--- a/src/twisted/python/_release.py
+++ b/src/twisted/python/_release.py
@@ -13,7 +13,6 @@
"""
import os
-import sys
from subprocess import STDOUT, CalledProcessError, check_output
from typing import Dict
@@ -21,9 +20,6 @@
from twisted.python.compat import execfile
-# Types of newsfragments.
-NEWSFRAGMENT_TYPES = ["doc", "bugfix", "misc", "feature", "removal"]
-
def runCommand(args, **kwargs):
"""Execute a vector of arguments.
@@ -283,98 +279,3 @@
Raised when a directory does not appear to be a repository directory of a
supported VCS.
"""
-
-
-class CheckNewsfragmentScript:
- """
- A thing for checking whether a checkout has a newsfragment.
- """
-
- def __init__(self, _print):
- self._print = _print
-
- def main(self, args):
- """
- Run the script.
-
- @type args: L{list} of L{str}
- @param args: The command line arguments to process. This must contain
- one string: the path to the root of the Twisted checkout.
- """
- if len(args) != 1:
- sys.exit("Must specify one argument: the Twisted checkout")
-
- encoding = sys.stdout.encoding or "ascii"
- location = os.path.abspath(args[0])
-
- branch = (
- runCommand([b"git", b"rev-parse", b"--abbrev-ref", "HEAD"], cwd=location)
- .decode(encoding)
- .strip()
- )
-
- # diff-filter=d to exclude deleted newsfiles (which will happen on the
- # release branch)
- r = (
- runCommand(
- [
- b"git",
- b"diff",
- b"--name-only",
- b"origin/trunk...",
- b"--diff-filter=d",
- ],
- cwd=location,
- )
- .decode(encoding)
- .strip()
- )
-
- if not r:
- self._print("On trunk or no diffs from trunk; no need to look at this.")
- sys.exit(0)
-
- files = r.strip().split(os.linesep)
-
- self._print("Looking at these files:")
- for change in files:
- self._print(change)
- self._print("----")
-
- if len(files) == 1:
- if files[0] == os.sep.join(["docs", "fun", "Twisted.Quotes"]):
- self._print("Quotes change only; no newsfragment needed.")
- sys.exit(0)
-
- newsfragments = []
-
- for change in files:
- if os.sep + "newsfragments" + os.sep in change:
- if "." in change and change.rsplit(".", 1)[1] in NEWSFRAGMENT_TYPES:
- newsfragments.append(change)
-
- if branch.startswith("release-"):
- if newsfragments:
- self._print("No newsfragments should be on the release branch.")
- sys.exit(1)
- else:
- self._print("Release branch with no newsfragments, all good.")
- sys.exit(0)
-
- if os.environ.get("GITHUB_HEAD_REF", "") == "pre-commit-ci-update-config":
- # The run was triggered by pre-commit.ci.
- if newsfragments:
- self._print(
- "No newsfragments should be present on an autoupdated branch."
- )
- sys.exit(1)
- else:
- self._print("Autoupdated branch with no newsfragments, all good.")
- sys.exit(0)
-
- for change in newsfragments:
- self._print("Found " + change)
- sys.exit(0)
-
- self._print("No newsfragment found. Have you committed it?")
- sys.exit(1)
|
{"golden_diff": "diff --git a/src/twisted/python/_release.py b/src/twisted/python/_release.py\n--- a/src/twisted/python/_release.py\n+++ b/src/twisted/python/_release.py\n@@ -13,7 +13,6 @@\n \"\"\"\n \n import os\n-import sys\n from subprocess import STDOUT, CalledProcessError, check_output\n from typing import Dict\n \n@@ -21,9 +20,6 @@\n \n from twisted.python.compat import execfile\n \n-# Types of newsfragments.\n-NEWSFRAGMENT_TYPES = [\"doc\", \"bugfix\", \"misc\", \"feature\", \"removal\"]\n-\n \n def runCommand(args, **kwargs):\n \"\"\"Execute a vector of arguments.\n@@ -283,98 +279,3 @@\n Raised when a directory does not appear to be a repository directory of a\n supported VCS.\n \"\"\"\n-\n-\n-class CheckNewsfragmentScript:\n- \"\"\"\n- A thing for checking whether a checkout has a newsfragment.\n- \"\"\"\n-\n- def __init__(self, _print):\n- self._print = _print\n-\n- def main(self, args):\n- \"\"\"\n- Run the script.\n-\n- @type args: L{list} of L{str}\n- @param args: The command line arguments to process. This must contain\n- one string: the path to the root of the Twisted checkout.\n- \"\"\"\n- if len(args) != 1:\n- sys.exit(\"Must specify one argument: the Twisted checkout\")\n-\n- encoding = sys.stdout.encoding or \"ascii\"\n- location = os.path.abspath(args[0])\n-\n- branch = (\n- runCommand([b\"git\", b\"rev-parse\", b\"--abbrev-ref\", \"HEAD\"], cwd=location)\n- .decode(encoding)\n- .strip()\n- )\n-\n- # diff-filter=d to exclude deleted newsfiles (which will happen on the\n- # release branch)\n- r = (\n- runCommand(\n- [\n- b\"git\",\n- b\"diff\",\n- b\"--name-only\",\n- b\"origin/trunk...\",\n- b\"--diff-filter=d\",\n- ],\n- cwd=location,\n- )\n- .decode(encoding)\n- .strip()\n- )\n-\n- if not r:\n- self._print(\"On trunk or no diffs from trunk; no need to look at this.\")\n- sys.exit(0)\n-\n- files = r.strip().split(os.linesep)\n-\n- self._print(\"Looking at these files:\")\n- for change in files:\n- self._print(change)\n- self._print(\"----\")\n-\n- if len(files) == 1:\n- if files[0] == os.sep.join([\"docs\", \"fun\", \"Twisted.Quotes\"]):\n- self._print(\"Quotes change only; no newsfragment needed.\")\n- sys.exit(0)\n-\n- newsfragments = []\n-\n- for change in files:\n- if os.sep + \"newsfragments\" + os.sep in change:\n- if \".\" in change and change.rsplit(\".\", 1)[1] in NEWSFRAGMENT_TYPES:\n- newsfragments.append(change)\n-\n- if branch.startswith(\"release-\"):\n- if newsfragments:\n- self._print(\"No newsfragments should be on the release branch.\")\n- sys.exit(1)\n- else:\n- self._print(\"Release branch with no newsfragments, all good.\")\n- sys.exit(0)\n-\n- if os.environ.get(\"GITHUB_HEAD_REF\", \"\") == \"pre-commit-ci-update-config\":\n- # The run was triggered by pre-commit.ci.\n- if newsfragments:\n- self._print(\n- \"No newsfragments should be present on an autoupdated branch.\"\n- )\n- sys.exit(1)\n- else:\n- self._print(\"Autoupdated branch with no newsfragments, all good.\")\n- sys.exit(0)\n-\n- for change in newsfragments:\n- self._print(\"Found \" + change)\n- sys.exit(0)\n-\n- self._print(\"No newsfragment found. Have you committed it?\")\n- sys.exit(1)\n", "issue": "Update towncrier\nWe use on old version of towncrier with a custom script to detect release branch.\r\n\r\nThe latest towncrier should have support to checking a release branch.\r\n\r\nWe should update to towncrier and remove our custom script.\r\n\r\nAny good stuff used in twisted/twisted custom towncrier script should be moved upstream.\n", "before_files": [{"content": "# -*- test-case-name: twisted.python.test.test_release -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nTwisted's automated release system.\n\nThis module is only for use within Twisted's release system. If you are anyone\nelse, do not use it. The interface and behaviour will change without notice.\n\nOnly Linux is supported by this code. It should not be used by any tools\nwhich must run on multiple platforms (eg the setup.py script).\n\"\"\"\n\nimport os\nimport sys\nfrom subprocess import STDOUT, CalledProcessError, check_output\nfrom typing import Dict\n\nfrom zope.interface import Interface, implementer\n\nfrom twisted.python.compat import execfile\n\n# Types of newsfragments.\nNEWSFRAGMENT_TYPES = [\"doc\", \"bugfix\", \"misc\", \"feature\", \"removal\"]\n\n\ndef runCommand(args, **kwargs):\n \"\"\"Execute a vector of arguments.\n\n This is a wrapper around L{subprocess.check_output}, so it takes\n the same arguments as L{subprocess.Popen} with one difference: all\n arguments after the vector must be keyword arguments.\n\n @param args: arguments passed to L{subprocess.check_output}\n @param kwargs: keyword arguments passed to L{subprocess.check_output}\n @return: command output\n @rtype: L{bytes}\n \"\"\"\n kwargs[\"stderr\"] = STDOUT\n return check_output(args, **kwargs)\n\n\nclass IVCSCommand(Interface):\n \"\"\"\n An interface for VCS commands.\n \"\"\"\n\n def ensureIsWorkingDirectory(path):\n \"\"\"\n Ensure that C{path} is a working directory of this VCS.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to check.\n \"\"\"\n\n def isStatusClean(path):\n \"\"\"\n Return the Git status of the files in the specified path.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to get the status from (can be a directory or a\n file.)\n \"\"\"\n\n def remove(path):\n \"\"\"\n Remove the specified path from a the VCS.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to remove from the repository.\n \"\"\"\n\n def exportTo(fromDir, exportDir):\n \"\"\"\n Export the content of the VCSrepository to the specified directory.\n\n @type fromDir: L{twisted.python.filepath.FilePath}\n @param fromDir: The path to the VCS repository to export.\n\n @type exportDir: L{twisted.python.filepath.FilePath}\n @param exportDir: The directory to export the content of the\n repository to. This directory doesn't have to exist prior to\n exporting the repository.\n \"\"\"\n\n\n@implementer(IVCSCommand)\nclass GitCommand:\n \"\"\"\n Subset of Git commands to release Twisted from a Git repository.\n \"\"\"\n\n @staticmethod\n def ensureIsWorkingDirectory(path):\n \"\"\"\n Ensure that C{path} is a Git working directory.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to check.\n \"\"\"\n try:\n runCommand([\"git\", \"rev-parse\"], cwd=path.path)\n except (CalledProcessError, OSError):\n raise NotWorkingDirectory(\n f\"{path.path} does not appear to be a Git repository.\"\n )\n\n @staticmethod\n def isStatusClean(path):\n \"\"\"\n Return the Git status of the files in the specified path.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to get the status from (can be a directory or a\n file.)\n \"\"\"\n status = runCommand([\"git\", \"-C\", path.path, \"status\", \"--short\"]).strip()\n return status == b\"\"\n\n @staticmethod\n def remove(path):\n \"\"\"\n Remove the specified path from a Git repository.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to remove from the repository.\n \"\"\"\n runCommand([\"git\", \"-C\", path.dirname(), \"rm\", path.path])\n\n @staticmethod\n def exportTo(fromDir, exportDir):\n \"\"\"\n Export the content of a Git repository to the specified directory.\n\n @type fromDir: L{twisted.python.filepath.FilePath}\n @param fromDir: The path to the Git repository to export.\n\n @type exportDir: L{twisted.python.filepath.FilePath}\n @param exportDir: The directory to export the content of the\n repository to. This directory doesn't have to exist prior to\n exporting the repository.\n \"\"\"\n runCommand(\n [\n \"git\",\n \"-C\",\n fromDir.path,\n \"checkout-index\",\n \"--all\",\n \"--force\",\n # prefix has to end up with a \"/\" so that files get copied\n # to a directory whose name is the prefix.\n \"--prefix\",\n exportDir.path + \"/\",\n ]\n )\n\n\ndef getRepositoryCommand(directory):\n \"\"\"\n Detect the VCS used in the specified directory and return a L{GitCommand}\n if the directory is a Git repository. If the directory is not git, it\n raises a L{NotWorkingDirectory} exception.\n\n @type directory: L{FilePath}\n @param directory: The directory to detect the VCS used from.\n\n @rtype: L{GitCommand}\n\n @raise NotWorkingDirectory: if no supported VCS can be found from the\n specified directory.\n \"\"\"\n try:\n GitCommand.ensureIsWorkingDirectory(directory)\n return GitCommand\n except (NotWorkingDirectory, OSError):\n # It's not Git, but that's okay, eat the error\n pass\n\n raise NotWorkingDirectory(f\"No supported VCS can be found in {directory.path}\")\n\n\nclass Project:\n \"\"\"\n A representation of a project that has a version.\n\n @ivar directory: A L{twisted.python.filepath.FilePath} pointing to the base\n directory of a Twisted-style Python package. The package should contain\n a C{_version.py} file and a C{newsfragments} directory that contains a\n C{README} file.\n \"\"\"\n\n def __init__(self, directory):\n self.directory = directory\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.directory!r})\"\n\n def getVersion(self):\n \"\"\"\n @return: A L{incremental.Version} specifying the version number of the\n project based on live python modules.\n \"\"\"\n namespace: Dict[str, object] = {}\n directory = self.directory\n while not namespace:\n if directory.path == \"/\":\n raise Exception(\"Not inside a Twisted project.\")\n elif not directory.basename() == \"twisted\":\n directory = directory.parent()\n else:\n execfile(directory.child(\"_version.py\").path, namespace)\n return namespace[\"__version__\"]\n\n\ndef findTwistedProjects(baseDirectory):\n \"\"\"\n Find all Twisted-style projects beneath a base directory.\n\n @param baseDirectory: A L{twisted.python.filepath.FilePath} to look inside.\n @return: A list of L{Project}.\n \"\"\"\n projects = []\n for filePath in baseDirectory.walk():\n if filePath.basename() == \"newsfragments\":\n projectDirectory = filePath.parent()\n projects.append(Project(projectDirectory))\n return projects\n\n\ndef replaceInFile(filename, oldToNew):\n \"\"\"\n I replace the text `oldstr' with `newstr' in `filename' using science.\n \"\"\"\n os.rename(filename, filename + \".bak\")\n with open(filename + \".bak\") as f:\n d = f.read()\n for k, v in oldToNew.items():\n d = d.replace(k, v)\n with open(filename + \".new\", \"w\") as f:\n f.write(d)\n os.rename(filename + \".new\", filename)\n os.unlink(filename + \".bak\")\n\n\nclass NoDocumentsFound(Exception):\n \"\"\"\n Raised when no input documents are found.\n \"\"\"\n\n\ndef filePathDelta(origin, destination):\n \"\"\"\n Return a list of strings that represent C{destination} as a path relative\n to C{origin}.\n\n It is assumed that both paths represent directories, not files. That is to\n say, the delta of L{twisted.python.filepath.FilePath} /foo/bar to\n L{twisted.python.filepath.FilePath} /foo/baz will be C{../baz},\n not C{baz}.\n\n @type origin: L{twisted.python.filepath.FilePath}\n @param origin: The origin of the relative path.\n\n @type destination: L{twisted.python.filepath.FilePath}\n @param destination: The destination of the relative path.\n \"\"\"\n commonItems = 0\n path1 = origin.path.split(os.sep)\n path2 = destination.path.split(os.sep)\n for elem1, elem2 in zip(path1, path2):\n if elem1 == elem2:\n commonItems += 1\n else:\n break\n path = [\"..\"] * (len(path1) - commonItems)\n return path + path2[commonItems:]\n\n\nclass NotWorkingDirectory(Exception):\n \"\"\"\n Raised when a directory does not appear to be a repository directory of a\n supported VCS.\n \"\"\"\n\n\nclass CheckNewsfragmentScript:\n \"\"\"\n A thing for checking whether a checkout has a newsfragment.\n \"\"\"\n\n def __init__(self, _print):\n self._print = _print\n\n def main(self, args):\n \"\"\"\n Run the script.\n\n @type args: L{list} of L{str}\n @param args: The command line arguments to process. This must contain\n one string: the path to the root of the Twisted checkout.\n \"\"\"\n if len(args) != 1:\n sys.exit(\"Must specify one argument: the Twisted checkout\")\n\n encoding = sys.stdout.encoding or \"ascii\"\n location = os.path.abspath(args[0])\n\n branch = (\n runCommand([b\"git\", b\"rev-parse\", b\"--abbrev-ref\", \"HEAD\"], cwd=location)\n .decode(encoding)\n .strip()\n )\n\n # diff-filter=d to exclude deleted newsfiles (which will happen on the\n # release branch)\n r = (\n runCommand(\n [\n b\"git\",\n b\"diff\",\n b\"--name-only\",\n b\"origin/trunk...\",\n b\"--diff-filter=d\",\n ],\n cwd=location,\n )\n .decode(encoding)\n .strip()\n )\n\n if not r:\n self._print(\"On trunk or no diffs from trunk; no need to look at this.\")\n sys.exit(0)\n\n files = r.strip().split(os.linesep)\n\n self._print(\"Looking at these files:\")\n for change in files:\n self._print(change)\n self._print(\"----\")\n\n if len(files) == 1:\n if files[0] == os.sep.join([\"docs\", \"fun\", \"Twisted.Quotes\"]):\n self._print(\"Quotes change only; no newsfragment needed.\")\n sys.exit(0)\n\n newsfragments = []\n\n for change in files:\n if os.sep + \"newsfragments\" + os.sep in change:\n if \".\" in change and change.rsplit(\".\", 1)[1] in NEWSFRAGMENT_TYPES:\n newsfragments.append(change)\n\n if branch.startswith(\"release-\"):\n if newsfragments:\n self._print(\"No newsfragments should be on the release branch.\")\n sys.exit(1)\n else:\n self._print(\"Release branch with no newsfragments, all good.\")\n sys.exit(0)\n\n if os.environ.get(\"GITHUB_HEAD_REF\", \"\") == \"pre-commit-ci-update-config\":\n # The run was triggered by pre-commit.ci.\n if newsfragments:\n self._print(\n \"No newsfragments should be present on an autoupdated branch.\"\n )\n sys.exit(1)\n else:\n self._print(\"Autoupdated branch with no newsfragments, all good.\")\n sys.exit(0)\n\n for change in newsfragments:\n self._print(\"Found \" + change)\n sys.exit(0)\n\n self._print(\"No newsfragment found. Have you committed it?\")\n sys.exit(1)\n", "path": "src/twisted/python/_release.py"}], "after_files": [{"content": "# -*- test-case-name: twisted.python.test.test_release -*-\n# Copyright (c) Twisted Matrix Laboratories.\n# See LICENSE for details.\n\n\"\"\"\nTwisted's automated release system.\n\nThis module is only for use within Twisted's release system. If you are anyone\nelse, do not use it. The interface and behaviour will change without notice.\n\nOnly Linux is supported by this code. It should not be used by any tools\nwhich must run on multiple platforms (eg the setup.py script).\n\"\"\"\n\nimport os\nfrom subprocess import STDOUT, CalledProcessError, check_output\nfrom typing import Dict\n\nfrom zope.interface import Interface, implementer\n\nfrom twisted.python.compat import execfile\n\n\ndef runCommand(args, **kwargs):\n \"\"\"Execute a vector of arguments.\n\n This is a wrapper around L{subprocess.check_output}, so it takes\n the same arguments as L{subprocess.Popen} with one difference: all\n arguments after the vector must be keyword arguments.\n\n @param args: arguments passed to L{subprocess.check_output}\n @param kwargs: keyword arguments passed to L{subprocess.check_output}\n @return: command output\n @rtype: L{bytes}\n \"\"\"\n kwargs[\"stderr\"] = STDOUT\n return check_output(args, **kwargs)\n\n\nclass IVCSCommand(Interface):\n \"\"\"\n An interface for VCS commands.\n \"\"\"\n\n def ensureIsWorkingDirectory(path):\n \"\"\"\n Ensure that C{path} is a working directory of this VCS.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to check.\n \"\"\"\n\n def isStatusClean(path):\n \"\"\"\n Return the Git status of the files in the specified path.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to get the status from (can be a directory or a\n file.)\n \"\"\"\n\n def remove(path):\n \"\"\"\n Remove the specified path from a the VCS.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to remove from the repository.\n \"\"\"\n\n def exportTo(fromDir, exportDir):\n \"\"\"\n Export the content of the VCSrepository to the specified directory.\n\n @type fromDir: L{twisted.python.filepath.FilePath}\n @param fromDir: The path to the VCS repository to export.\n\n @type exportDir: L{twisted.python.filepath.FilePath}\n @param exportDir: The directory to export the content of the\n repository to. This directory doesn't have to exist prior to\n exporting the repository.\n \"\"\"\n\n\n@implementer(IVCSCommand)\nclass GitCommand:\n \"\"\"\n Subset of Git commands to release Twisted from a Git repository.\n \"\"\"\n\n @staticmethod\n def ensureIsWorkingDirectory(path):\n \"\"\"\n Ensure that C{path} is a Git working directory.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to check.\n \"\"\"\n try:\n runCommand([\"git\", \"rev-parse\"], cwd=path.path)\n except (CalledProcessError, OSError):\n raise NotWorkingDirectory(\n f\"{path.path} does not appear to be a Git repository.\"\n )\n\n @staticmethod\n def isStatusClean(path):\n \"\"\"\n Return the Git status of the files in the specified path.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to get the status from (can be a directory or a\n file.)\n \"\"\"\n status = runCommand([\"git\", \"-C\", path.path, \"status\", \"--short\"]).strip()\n return status == b\"\"\n\n @staticmethod\n def remove(path):\n \"\"\"\n Remove the specified path from a Git repository.\n\n @type path: L{twisted.python.filepath.FilePath}\n @param path: The path to remove from the repository.\n \"\"\"\n runCommand([\"git\", \"-C\", path.dirname(), \"rm\", path.path])\n\n @staticmethod\n def exportTo(fromDir, exportDir):\n \"\"\"\n Export the content of a Git repository to the specified directory.\n\n @type fromDir: L{twisted.python.filepath.FilePath}\n @param fromDir: The path to the Git repository to export.\n\n @type exportDir: L{twisted.python.filepath.FilePath}\n @param exportDir: The directory to export the content of the\n repository to. This directory doesn't have to exist prior to\n exporting the repository.\n \"\"\"\n runCommand(\n [\n \"git\",\n \"-C\",\n fromDir.path,\n \"checkout-index\",\n \"--all\",\n \"--force\",\n # prefix has to end up with a \"/\" so that files get copied\n # to a directory whose name is the prefix.\n \"--prefix\",\n exportDir.path + \"/\",\n ]\n )\n\n\ndef getRepositoryCommand(directory):\n \"\"\"\n Detect the VCS used in the specified directory and return a L{GitCommand}\n if the directory is a Git repository. If the directory is not git, it\n raises a L{NotWorkingDirectory} exception.\n\n @type directory: L{FilePath}\n @param directory: The directory to detect the VCS used from.\n\n @rtype: L{GitCommand}\n\n @raise NotWorkingDirectory: if no supported VCS can be found from the\n specified directory.\n \"\"\"\n try:\n GitCommand.ensureIsWorkingDirectory(directory)\n return GitCommand\n except (NotWorkingDirectory, OSError):\n # It's not Git, but that's okay, eat the error\n pass\n\n raise NotWorkingDirectory(f\"No supported VCS can be found in {directory.path}\")\n\n\nclass Project:\n \"\"\"\n A representation of a project that has a version.\n\n @ivar directory: A L{twisted.python.filepath.FilePath} pointing to the base\n directory of a Twisted-style Python package. The package should contain\n a C{_version.py} file and a C{newsfragments} directory that contains a\n C{README} file.\n \"\"\"\n\n def __init__(self, directory):\n self.directory = directory\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.directory!r})\"\n\n def getVersion(self):\n \"\"\"\n @return: A L{incremental.Version} specifying the version number of the\n project based on live python modules.\n \"\"\"\n namespace: Dict[str, object] = {}\n directory = self.directory\n while not namespace:\n if directory.path == \"/\":\n raise Exception(\"Not inside a Twisted project.\")\n elif not directory.basename() == \"twisted\":\n directory = directory.parent()\n else:\n execfile(directory.child(\"_version.py\").path, namespace)\n return namespace[\"__version__\"]\n\n\ndef findTwistedProjects(baseDirectory):\n \"\"\"\n Find all Twisted-style projects beneath a base directory.\n\n @param baseDirectory: A L{twisted.python.filepath.FilePath} to look inside.\n @return: A list of L{Project}.\n \"\"\"\n projects = []\n for filePath in baseDirectory.walk():\n if filePath.basename() == \"newsfragments\":\n projectDirectory = filePath.parent()\n projects.append(Project(projectDirectory))\n return projects\n\n\ndef replaceInFile(filename, oldToNew):\n \"\"\"\n I replace the text `oldstr' with `newstr' in `filename' using science.\n \"\"\"\n os.rename(filename, filename + \".bak\")\n with open(filename + \".bak\") as f:\n d = f.read()\n for k, v in oldToNew.items():\n d = d.replace(k, v)\n with open(filename + \".new\", \"w\") as f:\n f.write(d)\n os.rename(filename + \".new\", filename)\n os.unlink(filename + \".bak\")\n\n\nclass NoDocumentsFound(Exception):\n \"\"\"\n Raised when no input documents are found.\n \"\"\"\n\n\ndef filePathDelta(origin, destination):\n \"\"\"\n Return a list of strings that represent C{destination} as a path relative\n to C{origin}.\n\n It is assumed that both paths represent directories, not files. That is to\n say, the delta of L{twisted.python.filepath.FilePath} /foo/bar to\n L{twisted.python.filepath.FilePath} /foo/baz will be C{../baz},\n not C{baz}.\n\n @type origin: L{twisted.python.filepath.FilePath}\n @param origin: The origin of the relative path.\n\n @type destination: L{twisted.python.filepath.FilePath}\n @param destination: The destination of the relative path.\n \"\"\"\n commonItems = 0\n path1 = origin.path.split(os.sep)\n path2 = destination.path.split(os.sep)\n for elem1, elem2 in zip(path1, path2):\n if elem1 == elem2:\n commonItems += 1\n else:\n break\n path = [\"..\"] * (len(path1) - commonItems)\n return path + path2[commonItems:]\n\n\nclass NotWorkingDirectory(Exception):\n \"\"\"\n Raised when a directory does not appear to be a repository directory of a\n supported VCS.\n \"\"\"\n", "path": "src/twisted/python/_release.py"}]}
| 4,081 | 927 |
gh_patches_debug_42167
|
rasdani/github-patches
|
git_diff
|
sunpy__sunpy-3970
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
sunpy.data.manager does not allow for local path object in manager. override_file()
<!-- This comments are hidden when you submit the issue so you do not need to remove them!
Please be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst
Please be sure to check out our code of conduct:
https://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->
<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.
If a similar issue is closed, have a quick look to see if you are satisfied by the resolution.
If not please go ahead and open an issue! -->
### Description
It would be great if `sunpy.data.manager` could take a local file path
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sunpy/data/data_manager/manager.py`
Content:
```
1 from typing import Dict
2 import pathlib
3 import functools
4 from contextlib import contextmanager
5 import warnings
6
7 from sunpy.util.util import hash_file
8 from sunpy.util.exceptions import SunpyUserWarning
9
10 __all__ = ['DataManager']
11
12
13 class DataManager:
14 """
15 This class provides a remote data manager for managing remote files.
16
17 Parameters
18 ----------
19 cache: `sunpy.data.data_manager.cache.Cache`
20 Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.
21 """
22
23 def __init__(self, cache):
24 self._cache = cache
25
26 self._file_cache = {}
27
28 self._skip_hash_check = False
29 self._skip_file: Dict[str, str] = {}
30
31 def require(self, name, urls, sha_hash):
32 """
33 Decorator for informing the data manager about the requirement of
34 a file by a function.
35
36 Parameters
37 ----------
38 name: `str`
39 The name to reference the file with.
40 urls: `list` or `str`
41 A list of urls to download the file from.
42 sha_hash: `str`
43 SHA-1 hash of file.
44 """
45 if isinstance(urls, str):
46 urls = [urls]
47
48 def decorator(func):
49 @functools.wraps(func)
50 def wrapper(*args, **kwargs):
51 replace = self._skip_file.get(name, None)
52 if replace:
53 if replace['uri'].startswith('file://'):
54 file_path = replace['uri'][len('file://'):]
55 file_hash = hash_file(file_path)
56 else:
57 file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])
58 if replace['hash'] and file_hash != replace['hash']:
59 # if hash provided to replace function doesn't match the hash of the file
60 # raise error
61 raise ValueError(
62 "Hash provided to override_file does not match hash of the file.")
63 elif self._skip_hash_check:
64 file_path = self._cache.download(urls, redownload=True)
65 else:
66 details = self._cache.get_by_hash(sha_hash)
67 if not details:
68 # In case we are matching by hash and file does not exist
69 # That might mean the wrong hash is supplied to decorator
70 # We match by urls to make sure that is not the case
71 if self._cache_has_file(urls):
72 raise ValueError(" Hash provided does not match the hash in database.")
73 file_path = self._cache.download(urls)
74 if hash_file(file_path) != sha_hash:
75 # the hash of the file downloaded does not match provided hash
76 # this means the file has changed on the server.
77 # the function should be updated to use the new hash. Raise an error to notify.
78 raise RuntimeError(
79 "Remote file on the server has changed. Update hash of the function.")
80 else:
81 # This is to handle the case when the local file appears to be tampered/corrupted
82 if hash_file(details['file_path']) != details['file_hash']:
83 warnings.warn("Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)",
84 SunpyUserWarning)
85 file_path = self._cache.download(urls, redownload=True)
86 # Recheck the hash again, if this fails, we will exit.
87 if hash_file(file_path) != details['file_hash']:
88 raise RuntimeError("Redownloaded file also has the incorrect hash."
89 "The remote file on the server might have changed.")
90 else:
91 file_path = details['file_path']
92
93 self._file_cache[name] = file_path
94 return func(*args, **kwargs)
95 return wrapper
96
97 return decorator
98
99 @contextmanager
100 def override_file(self, name, uri, sha_hash=None):
101 """
102 Replaces the file by the name with the file provided by the url/path.
103
104 Parameters
105 ----------
106 name: `str`
107 Name of the file provided in the `require` decorator.
108 uri: `str`
109 URI of the file which replaces original file. Scheme should be
110 one of ``http``, ``https``, ``ftp`` or ``file``.
111 sha_hash: `str`, optional
112 SHA256 hash of the file to compared to after downloading.
113 """
114 try:
115 self._skip_file[name] = {
116 'uri': uri,
117 'hash': sha_hash,
118 }
119 yield
120 finally:
121 _ = self._skip_file.pop(name, None)
122
123 @contextmanager
124 def skip_hash_check(self):
125 """
126 Disables hash checking temporarily
127
128 Examples
129 --------
130 >>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP
131 ... myfunction() # doctest: +SKIP
132 """
133 try:
134 self._skip_hash_check = True
135 yield
136 finally:
137 self._skip_hash_check = False
138
139 def get(self, name):
140 """
141 Get the file by name.
142
143 Parameters
144 ----------
145 name: `str`
146 Name of the file given to the data manager, same as the one provided
147 in `~sunpy.data.data_manager.manager.DataManager.require`.
148
149 Returns
150 -------
151 `pathlib.Path`
152 Path of the file.
153
154 Raises
155 ------
156 `KeyError`
157 If ``name`` is not in the cache.
158 """
159 return pathlib.Path(self._file_cache[name])
160
161 def _cache_has_file(self, urls):
162 for url in urls:
163 if self._cache._get_by_url(url):
164 return True
165 return False
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sunpy/data/data_manager/manager.py b/sunpy/data/data_manager/manager.py
--- a/sunpy/data/data_manager/manager.py
+++ b/sunpy/data/data_manager/manager.py
@@ -1,11 +1,12 @@
-from typing import Dict
import pathlib
+import warnings
import functools
+from typing import Dict
from contextlib import contextmanager
-import warnings
+from urllib.parse import urlparse
-from sunpy.util.util import hash_file
from sunpy.util.exceptions import SunpyUserWarning
+from sunpy.util.util import hash_file
__all__ = ['DataManager']
@@ -50,8 +51,14 @@
def wrapper(*args, **kwargs):
replace = self._skip_file.get(name, None)
if replace:
- if replace['uri'].startswith('file://'):
- file_path = replace['uri'][len('file://'):]
+ uri_parse = urlparse(replace['uri'])
+ if uri_parse.scheme in ("", "file"):
+ # If a relative file uri is specified (i.e.
+ # `file://sunpy/test`) this maintains compatibility
+ # with the original behaviour where this would be
+ # interpreted as `./sunpy/test` if no scheme is
+ # specified netloc will be '' by default.
+ file_path = uri_parse.netloc + uri_parse.path
file_hash = hash_file(file_path)
else:
file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])
@@ -74,11 +81,13 @@
if hash_file(file_path) != sha_hash:
# the hash of the file downloaded does not match provided hash
# this means the file has changed on the server.
- # the function should be updated to use the new hash. Raise an error to notify.
+ # the function should be updated to use the new
+ # hash. Raise an error to notify.
raise RuntimeError(
"Remote file on the server has changed. Update hash of the function.")
else:
- # This is to handle the case when the local file appears to be tampered/corrupted
+ # This is to handle the case when the local file
+ # appears to be tampered/corrupted
if hash_file(details['file_path']) != details['file_hash']:
warnings.warn("Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)",
SunpyUserWarning)
@@ -106,8 +115,10 @@
name: `str`
Name of the file provided in the `require` decorator.
uri: `str`
- URI of the file which replaces original file. Scheme should be
- one of ``http``, ``https``, ``ftp`` or ``file``.
+ URI of the file which replaces original file. Scheme should be one
+ of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given
+ the uri will be interpreted as a local path. i.e.
+ ``file:///tmp/test`` and ``/tmp/test`` are the same.
sha_hash: `str`, optional
SHA256 hash of the file to compared to after downloading.
"""
|
{"golden_diff": "diff --git a/sunpy/data/data_manager/manager.py b/sunpy/data/data_manager/manager.py\n--- a/sunpy/data/data_manager/manager.py\n+++ b/sunpy/data/data_manager/manager.py\n@@ -1,11 +1,12 @@\n-from typing import Dict\n import pathlib\n+import warnings\n import functools\n+from typing import Dict\n from contextlib import contextmanager\n-import warnings\n+from urllib.parse import urlparse\n \n-from sunpy.util.util import hash_file\n from sunpy.util.exceptions import SunpyUserWarning\n+from sunpy.util.util import hash_file\n \n __all__ = ['DataManager']\n \n@@ -50,8 +51,14 @@\n def wrapper(*args, **kwargs):\n replace = self._skip_file.get(name, None)\n if replace:\n- if replace['uri'].startswith('file://'):\n- file_path = replace['uri'][len('file://'):]\n+ uri_parse = urlparse(replace['uri'])\n+ if uri_parse.scheme in (\"\", \"file\"):\n+ # If a relative file uri is specified (i.e.\n+ # `file://sunpy/test`) this maintains compatibility\n+ # with the original behaviour where this would be\n+ # interpreted as `./sunpy/test` if no scheme is\n+ # specified netloc will be '' by default.\n+ file_path = uri_parse.netloc + uri_parse.path\n file_hash = hash_file(file_path)\n else:\n file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])\n@@ -74,11 +81,13 @@\n if hash_file(file_path) != sha_hash:\n # the hash of the file downloaded does not match provided hash\n # this means the file has changed on the server.\n- # the function should be updated to use the new hash. Raise an error to notify.\n+ # the function should be updated to use the new\n+ # hash. Raise an error to notify.\n raise RuntimeError(\n \"Remote file on the server has changed. Update hash of the function.\")\n else:\n- # This is to handle the case when the local file appears to be tampered/corrupted\n+ # This is to handle the case when the local file\n+ # appears to be tampered/corrupted\n if hash_file(details['file_path']) != details['file_hash']:\n warnings.warn(\"Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)\",\n SunpyUserWarning)\n@@ -106,8 +115,10 @@\n name: `str`\n Name of the file provided in the `require` decorator.\n uri: `str`\n- URI of the file which replaces original file. Scheme should be\n- one of ``http``, ``https``, ``ftp`` or ``file``.\n+ URI of the file which replaces original file. Scheme should be one\n+ of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given\n+ the uri will be interpreted as a local path. i.e.\n+ ``file:///tmp/test`` and ``/tmp/test`` are the same.\n sha_hash: `str`, optional\n SHA256 hash of the file to compared to after downloading.\n \"\"\"\n", "issue": "sunpy.data.manager does not allow for local path object in manager. override_file()\n<!-- This comments are hidden when you submit the issue so you do not need to remove them!\r\nPlease be sure to check out our contributing guidelines: https://github.com/sunpy/sunpy/blob/master/CONTRIBUTING.rst\r\nPlease be sure to check out our code of conduct:\r\nhttps://github.com/sunpy/sunpy/blob/master/CODE_OF_CONDUCT.rst -->\r\n\r\n<!-- Please have a search on our GitHub repository to see if a similar issue has already been posted.\r\nIf a similar issue is closed, have a quick look to see if you are satisfied by the resolution.\r\nIf not please go ahead and open an issue! -->\r\n\r\n### Description\r\nIt would be great if `sunpy.data.manager` could take a local file path\r\n\r\n\r\n\n", "before_files": [{"content": "from typing import Dict\nimport pathlib\nimport functools\nfrom contextlib import contextmanager\nimport warnings\n\nfrom sunpy.util.util import hash_file\nfrom sunpy.util.exceptions import SunpyUserWarning\n\n__all__ = ['DataManager']\n\n\nclass DataManager:\n \"\"\"\n This class provides a remote data manager for managing remote files.\n\n Parameters\n ----------\n cache: `sunpy.data.data_manager.cache.Cache`\n Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.\n \"\"\"\n\n def __init__(self, cache):\n self._cache = cache\n\n self._file_cache = {}\n\n self._skip_hash_check = False\n self._skip_file: Dict[str, str] = {}\n\n def require(self, name, urls, sha_hash):\n \"\"\"\n Decorator for informing the data manager about the requirement of\n a file by a function.\n\n Parameters\n ----------\n name: `str`\n The name to reference the file with.\n urls: `list` or `str`\n A list of urls to download the file from.\n sha_hash: `str`\n SHA-1 hash of file.\n \"\"\"\n if isinstance(urls, str):\n urls = [urls]\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n replace = self._skip_file.get(name, None)\n if replace:\n if replace['uri'].startswith('file://'):\n file_path = replace['uri'][len('file://'):]\n file_hash = hash_file(file_path)\n else:\n file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])\n if replace['hash'] and file_hash != replace['hash']:\n # if hash provided to replace function doesn't match the hash of the file\n # raise error\n raise ValueError(\n \"Hash provided to override_file does not match hash of the file.\")\n elif self._skip_hash_check:\n file_path = self._cache.download(urls, redownload=True)\n else:\n details = self._cache.get_by_hash(sha_hash)\n if not details:\n # In case we are matching by hash and file does not exist\n # That might mean the wrong hash is supplied to decorator\n # We match by urls to make sure that is not the case\n if self._cache_has_file(urls):\n raise ValueError(\" Hash provided does not match the hash in database.\")\n file_path = self._cache.download(urls)\n if hash_file(file_path) != sha_hash:\n # the hash of the file downloaded does not match provided hash\n # this means the file has changed on the server.\n # the function should be updated to use the new hash. Raise an error to notify.\n raise RuntimeError(\n \"Remote file on the server has changed. Update hash of the function.\")\n else:\n # This is to handle the case when the local file appears to be tampered/corrupted\n if hash_file(details['file_path']) != details['file_hash']:\n warnings.warn(\"Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)\",\n SunpyUserWarning)\n file_path = self._cache.download(urls, redownload=True)\n # Recheck the hash again, if this fails, we will exit.\n if hash_file(file_path) != details['file_hash']:\n raise RuntimeError(\"Redownloaded file also has the incorrect hash.\"\n \"The remote file on the server might have changed.\")\n else:\n file_path = details['file_path']\n\n self._file_cache[name] = file_path\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n @contextmanager\n def override_file(self, name, uri, sha_hash=None):\n \"\"\"\n Replaces the file by the name with the file provided by the url/path.\n\n Parameters\n ----------\n name: `str`\n Name of the file provided in the `require` decorator.\n uri: `str`\n URI of the file which replaces original file. Scheme should be\n one of ``http``, ``https``, ``ftp`` or ``file``.\n sha_hash: `str`, optional\n SHA256 hash of the file to compared to after downloading.\n \"\"\"\n try:\n self._skip_file[name] = {\n 'uri': uri,\n 'hash': sha_hash,\n }\n yield\n finally:\n _ = self._skip_file.pop(name, None)\n\n @contextmanager\n def skip_hash_check(self):\n \"\"\"\n Disables hash checking temporarily\n\n Examples\n --------\n >>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP\n ... myfunction() # doctest: +SKIP\n \"\"\"\n try:\n self._skip_hash_check = True\n yield\n finally:\n self._skip_hash_check = False\n\n def get(self, name):\n \"\"\"\n Get the file by name.\n\n Parameters\n ----------\n name: `str`\n Name of the file given to the data manager, same as the one provided\n in `~sunpy.data.data_manager.manager.DataManager.require`.\n\n Returns\n -------\n `pathlib.Path`\n Path of the file.\n\n Raises\n ------\n `KeyError`\n If ``name`` is not in the cache.\n \"\"\"\n return pathlib.Path(self._file_cache[name])\n\n def _cache_has_file(self, urls):\n for url in urls:\n if self._cache._get_by_url(url):\n return True\n return False\n", "path": "sunpy/data/data_manager/manager.py"}], "after_files": [{"content": "import pathlib\nimport warnings\nimport functools\nfrom typing import Dict\nfrom contextlib import contextmanager\nfrom urllib.parse import urlparse\n\nfrom sunpy.util.exceptions import SunpyUserWarning\nfrom sunpy.util.util import hash_file\n\n__all__ = ['DataManager']\n\n\nclass DataManager:\n \"\"\"\n This class provides a remote data manager for managing remote files.\n\n Parameters\n ----------\n cache: `sunpy.data.data_manager.cache.Cache`\n Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.\n \"\"\"\n\n def __init__(self, cache):\n self._cache = cache\n\n self._file_cache = {}\n\n self._skip_hash_check = False\n self._skip_file: Dict[str, str] = {}\n\n def require(self, name, urls, sha_hash):\n \"\"\"\n Decorator for informing the data manager about the requirement of\n a file by a function.\n\n Parameters\n ----------\n name: `str`\n The name to reference the file with.\n urls: `list` or `str`\n A list of urls to download the file from.\n sha_hash: `str`\n SHA-1 hash of file.\n \"\"\"\n if isinstance(urls, str):\n urls = [urls]\n\n def decorator(func):\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n replace = self._skip_file.get(name, None)\n if replace:\n uri_parse = urlparse(replace['uri'])\n if uri_parse.scheme in (\"\", \"file\"):\n # If a relative file uri is specified (i.e.\n # `file://sunpy/test`) this maintains compatibility\n # with the original behaviour where this would be\n # interpreted as `./sunpy/test` if no scheme is\n # specified netloc will be '' by default.\n file_path = uri_parse.netloc + uri_parse.path\n file_hash = hash_file(file_path)\n else:\n file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])\n if replace['hash'] and file_hash != replace['hash']:\n # if hash provided to replace function doesn't match the hash of the file\n # raise error\n raise ValueError(\n \"Hash provided to override_file does not match hash of the file.\")\n elif self._skip_hash_check:\n file_path = self._cache.download(urls, redownload=True)\n else:\n details = self._cache.get_by_hash(sha_hash)\n if not details:\n # In case we are matching by hash and file does not exist\n # That might mean the wrong hash is supplied to decorator\n # We match by urls to make sure that is not the case\n if self._cache_has_file(urls):\n raise ValueError(\" Hash provided does not match the hash in database.\")\n file_path = self._cache.download(urls)\n if hash_file(file_path) != sha_hash:\n # the hash of the file downloaded does not match provided hash\n # this means the file has changed on the server.\n # the function should be updated to use the new\n # hash. Raise an error to notify.\n raise RuntimeError(\n \"Remote file on the server has changed. Update hash of the function.\")\n else:\n # This is to handle the case when the local file\n # appears to be tampered/corrupted\n if hash_file(details['file_path']) != details['file_hash']:\n warnings.warn(\"Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)\",\n SunpyUserWarning)\n file_path = self._cache.download(urls, redownload=True)\n # Recheck the hash again, if this fails, we will exit.\n if hash_file(file_path) != details['file_hash']:\n raise RuntimeError(\"Redownloaded file also has the incorrect hash.\"\n \"The remote file on the server might have changed.\")\n else:\n file_path = details['file_path']\n\n self._file_cache[name] = file_path\n return func(*args, **kwargs)\n return wrapper\n\n return decorator\n\n @contextmanager\n def override_file(self, name, uri, sha_hash=None):\n \"\"\"\n Replaces the file by the name with the file provided by the url/path.\n\n Parameters\n ----------\n name: `str`\n Name of the file provided in the `require` decorator.\n uri: `str`\n URI of the file which replaces original file. Scheme should be one\n of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given\n the uri will be interpreted as a local path. i.e.\n ``file:///tmp/test`` and ``/tmp/test`` are the same.\n sha_hash: `str`, optional\n SHA256 hash of the file to compared to after downloading.\n \"\"\"\n try:\n self._skip_file[name] = {\n 'uri': uri,\n 'hash': sha_hash,\n }\n yield\n finally:\n _ = self._skip_file.pop(name, None)\n\n @contextmanager\n def skip_hash_check(self):\n \"\"\"\n Disables hash checking temporarily\n\n Examples\n --------\n >>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP\n ... myfunction() # doctest: +SKIP\n \"\"\"\n try:\n self._skip_hash_check = True\n yield\n finally:\n self._skip_hash_check = False\n\n def get(self, name):\n \"\"\"\n Get the file by name.\n\n Parameters\n ----------\n name: `str`\n Name of the file given to the data manager, same as the one provided\n in `~sunpy.data.data_manager.manager.DataManager.require`.\n\n Returns\n -------\n `pathlib.Path`\n Path of the file.\n\n Raises\n ------\n `KeyError`\n If ``name`` is not in the cache.\n \"\"\"\n return pathlib.Path(self._file_cache[name])\n\n def _cache_has_file(self, urls):\n for url in urls:\n if self._cache._get_by_url(url):\n return True\n return False\n", "path": "sunpy/data/data_manager/manager.py"}]}
| 2,028 | 724 |
gh_patches_debug_57933
|
rasdani/github-patches
|
git_diff
|
scrapy__scrapy-3668
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
about the signal retry_complete
I didn't find the singnal in the singnal list,how can I use it
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `scrapy/downloadermiddlewares/retry.py`
Content:
```
1 """
2 An extension to retry failed requests that are potentially caused by temporary
3 problems such as a connection timeout or HTTP 500 error.
4
5 You can change the behaviour of this middleware by modifing the scraping settings:
6 RETRY_TIMES - how many times to retry a failed page
7 RETRY_HTTP_CODES - which HTTP response codes to retry
8
9 Failed pages are collected on the scraping process and rescheduled at the end,
10 once the spider has finished crawling all regular (non failed) pages. Once
11 there is no more failed pages to retry this middleware sends a signal
12 (retry_complete), so other extensions could connect to that signal.
13 """
14 import logging
15
16 from twisted.internet import defer
17 from twisted.internet.error import TimeoutError, DNSLookupError, \
18 ConnectionRefusedError, ConnectionDone, ConnectError, \
19 ConnectionLost, TCPTimedOutError
20 from twisted.web.client import ResponseFailed
21
22 from scrapy.exceptions import NotConfigured
23 from scrapy.utils.response import response_status_message
24 from scrapy.core.downloader.handlers.http11 import TunnelError
25 from scrapy.utils.python import global_object_name
26
27 logger = logging.getLogger(__name__)
28
29
30 class RetryMiddleware(object):
31
32 # IOError is raised by the HttpCompression middleware when trying to
33 # decompress an empty response
34 EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,
35 ConnectionRefusedError, ConnectionDone, ConnectError,
36 ConnectionLost, TCPTimedOutError, ResponseFailed,
37 IOError, TunnelError)
38
39 def __init__(self, settings):
40 if not settings.getbool('RETRY_ENABLED'):
41 raise NotConfigured
42 self.max_retry_times = settings.getint('RETRY_TIMES')
43 self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))
44 self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')
45
46 @classmethod
47 def from_crawler(cls, crawler):
48 return cls(crawler.settings)
49
50 def process_response(self, request, response, spider):
51 if request.meta.get('dont_retry', False):
52 return response
53 if response.status in self.retry_http_codes:
54 reason = response_status_message(response.status)
55 return self._retry(request, reason, spider) or response
56 return response
57
58 def process_exception(self, request, exception, spider):
59 if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \
60 and not request.meta.get('dont_retry', False):
61 return self._retry(request, exception, spider)
62
63 def _retry(self, request, reason, spider):
64 retries = request.meta.get('retry_times', 0) + 1
65
66 retry_times = self.max_retry_times
67
68 if 'max_retry_times' in request.meta:
69 retry_times = request.meta['max_retry_times']
70
71 stats = spider.crawler.stats
72 if retries <= retry_times:
73 logger.debug("Retrying %(request)s (failed %(retries)d times): %(reason)s",
74 {'request': request, 'retries': retries, 'reason': reason},
75 extra={'spider': spider})
76 retryreq = request.copy()
77 retryreq.meta['retry_times'] = retries
78 retryreq.dont_filter = True
79 retryreq.priority = request.priority + self.priority_adjust
80
81 if isinstance(reason, Exception):
82 reason = global_object_name(reason.__class__)
83
84 stats.inc_value('retry/count')
85 stats.inc_value('retry/reason_count/%s' % reason)
86 return retryreq
87 else:
88 stats.inc_value('retry/max_reached')
89 logger.debug("Gave up retrying %(request)s (failed %(retries)d times): %(reason)s",
90 {'request': request, 'retries': retries, 'reason': reason},
91 extra={'spider': spider})
92
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py
--- a/scrapy/downloadermiddlewares/retry.py
+++ b/scrapy/downloadermiddlewares/retry.py
@@ -7,9 +7,7 @@
RETRY_HTTP_CODES - which HTTP response codes to retry
Failed pages are collected on the scraping process and rescheduled at the end,
-once the spider has finished crawling all regular (non failed) pages. Once
-there is no more failed pages to retry this middleware sends a signal
-(retry_complete), so other extensions could connect to that signal.
+once the spider has finished crawling all regular (non failed) pages.
"""
import logging
|
{"golden_diff": "diff --git a/scrapy/downloadermiddlewares/retry.py b/scrapy/downloadermiddlewares/retry.py\n--- a/scrapy/downloadermiddlewares/retry.py\n+++ b/scrapy/downloadermiddlewares/retry.py\n@@ -7,9 +7,7 @@\n RETRY_HTTP_CODES - which HTTP response codes to retry\n \n Failed pages are collected on the scraping process and rescheduled at the end,\n-once the spider has finished crawling all regular (non failed) pages. Once\n-there is no more failed pages to retry this middleware sends a signal\n-(retry_complete), so other extensions could connect to that signal.\n+once the spider has finished crawling all regular (non failed) pages.\n \"\"\"\n import logging\n", "issue": "about the signal retry_complete\nI didn't find the singnal in the singnal list,how can I use it\n", "before_files": [{"content": "\"\"\"\nAn extension to retry failed requests that are potentially caused by temporary\nproblems such as a connection timeout or HTTP 500 error.\n\nYou can change the behaviour of this middleware by modifing the scraping settings:\nRETRY_TIMES - how many times to retry a failed page\nRETRY_HTTP_CODES - which HTTP response codes to retry\n\nFailed pages are collected on the scraping process and rescheduled at the end,\nonce the spider has finished crawling all regular (non failed) pages. Once\nthere is no more failed pages to retry this middleware sends a signal\n(retry_complete), so other extensions could connect to that signal.\n\"\"\"\nimport logging\n\nfrom twisted.internet import defer\nfrom twisted.internet.error import TimeoutError, DNSLookupError, \\\n ConnectionRefusedError, ConnectionDone, ConnectError, \\\n ConnectionLost, TCPTimedOutError\nfrom twisted.web.client import ResponseFailed\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.response import response_status_message\nfrom scrapy.core.downloader.handlers.http11 import TunnelError\nfrom scrapy.utils.python import global_object_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryMiddleware(object):\n\n # IOError is raised by the HttpCompression middleware when trying to\n # decompress an empty response\n EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,\n ConnectionRefusedError, ConnectionDone, ConnectError,\n ConnectionLost, TCPTimedOutError, ResponseFailed,\n IOError, TunnelError)\n\n def __init__(self, settings):\n if not settings.getbool('RETRY_ENABLED'):\n raise NotConfigured\n self.max_retry_times = settings.getint('RETRY_TIMES')\n self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))\n self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_retry', False):\n return response\n if response.status in self.retry_http_codes:\n reason = response_status_message(response.status)\n return self._retry(request, reason, spider) or response\n return response\n\n def process_exception(self, request, exception, spider):\n if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \\\n and not request.meta.get('dont_retry', False):\n return self._retry(request, exception, spider)\n\n def _retry(self, request, reason, spider):\n retries = request.meta.get('retry_times', 0) + 1\n\n retry_times = self.max_retry_times\n\n if 'max_retry_times' in request.meta:\n retry_times = request.meta['max_retry_times']\n\n stats = spider.crawler.stats\n if retries <= retry_times:\n logger.debug(\"Retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n retryreq = request.copy()\n retryreq.meta['retry_times'] = retries\n retryreq.dont_filter = True\n retryreq.priority = request.priority + self.priority_adjust\n\n if isinstance(reason, Exception):\n reason = global_object_name(reason.__class__)\n\n stats.inc_value('retry/count')\n stats.inc_value('retry/reason_count/%s' % reason)\n return retryreq\n else:\n stats.inc_value('retry/max_reached')\n logger.debug(\"Gave up retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n", "path": "scrapy/downloadermiddlewares/retry.py"}], "after_files": [{"content": "\"\"\"\nAn extension to retry failed requests that are potentially caused by temporary\nproblems such as a connection timeout or HTTP 500 error.\n\nYou can change the behaviour of this middleware by modifing the scraping settings:\nRETRY_TIMES - how many times to retry a failed page\nRETRY_HTTP_CODES - which HTTP response codes to retry\n\nFailed pages are collected on the scraping process and rescheduled at the end,\nonce the spider has finished crawling all regular (non failed) pages.\n\"\"\"\nimport logging\n\nfrom twisted.internet import defer\nfrom twisted.internet.error import TimeoutError, DNSLookupError, \\\n ConnectionRefusedError, ConnectionDone, ConnectError, \\\n ConnectionLost, TCPTimedOutError\nfrom twisted.web.client import ResponseFailed\n\nfrom scrapy.exceptions import NotConfigured\nfrom scrapy.utils.response import response_status_message\nfrom scrapy.core.downloader.handlers.http11 import TunnelError\nfrom scrapy.utils.python import global_object_name\n\nlogger = logging.getLogger(__name__)\n\n\nclass RetryMiddleware(object):\n\n # IOError is raised by the HttpCompression middleware when trying to\n # decompress an empty response\n EXCEPTIONS_TO_RETRY = (defer.TimeoutError, TimeoutError, DNSLookupError,\n ConnectionRefusedError, ConnectionDone, ConnectError,\n ConnectionLost, TCPTimedOutError, ResponseFailed,\n IOError, TunnelError)\n\n def __init__(self, settings):\n if not settings.getbool('RETRY_ENABLED'):\n raise NotConfigured\n self.max_retry_times = settings.getint('RETRY_TIMES')\n self.retry_http_codes = set(int(x) for x in settings.getlist('RETRY_HTTP_CODES'))\n self.priority_adjust = settings.getint('RETRY_PRIORITY_ADJUST')\n\n @classmethod\n def from_crawler(cls, crawler):\n return cls(crawler.settings)\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_retry', False):\n return response\n if response.status in self.retry_http_codes:\n reason = response_status_message(response.status)\n return self._retry(request, reason, spider) or response\n return response\n\n def process_exception(self, request, exception, spider):\n if isinstance(exception, self.EXCEPTIONS_TO_RETRY) \\\n and not request.meta.get('dont_retry', False):\n return self._retry(request, exception, spider)\n\n def _retry(self, request, reason, spider):\n retries = request.meta.get('retry_times', 0) + 1\n\n retry_times = self.max_retry_times\n\n if 'max_retry_times' in request.meta:\n retry_times = request.meta['max_retry_times']\n\n stats = spider.crawler.stats\n if retries <= retry_times:\n logger.debug(\"Retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n retryreq = request.copy()\n retryreq.meta['retry_times'] = retries\n retryreq.dont_filter = True\n retryreq.priority = request.priority + self.priority_adjust\n\n if isinstance(reason, Exception):\n reason = global_object_name(reason.__class__)\n\n stats.inc_value('retry/count')\n stats.inc_value('retry/reason_count/%s' % reason)\n return retryreq\n else:\n stats.inc_value('retry/max_reached')\n logger.debug(\"Gave up retrying %(request)s (failed %(retries)d times): %(reason)s\",\n {'request': request, 'retries': retries, 'reason': reason},\n extra={'spider': spider})\n", "path": "scrapy/downloadermiddlewares/retry.py"}]}
| 1,259 | 145 |
gh_patches_debug_38420
|
rasdani/github-patches
|
git_diff
|
mlflow__mlflow-9290
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[DOC-FIX] No documentation body for mlflow.search_model_versions
### Willingness to contribute
No. I cannot contribute a documentation fix at this time.
### URL(s) with the issue
(https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.search_model_versions)
### Description of proposal (what needs changing)
There is no documentation body for mlflow. search_model_versions() unlike for mlflow.mlflow.search_registered_models().
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mlflow/tracking/_model_registry/fluent.py`
Content:
```
1 from mlflow.tracking.client import MlflowClient
2 from mlflow.exceptions import MlflowException
3 from mlflow.entities.model_registry import ModelVersion
4 from mlflow.entities.model_registry import RegisteredModel
5 from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ALREADY_EXISTS, ErrorCode
6 from mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository
7 from mlflow.utils.logging_utils import eprint
8 from mlflow.utils import get_results_from_paginated_fn
9 from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS
10 from mlflow.store.model_registry import (
11 SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,
12 SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,
13 )
14 from typing import Any, Dict, Optional, List
15
16
17 def register_model(
18 model_uri,
19 name,
20 await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
21 *,
22 tags: Optional[Dict[str, Any]] = None,
23 ) -> ModelVersion:
24 """
25 Create a new model version in model registry for the model files specified by ``model_uri``.
26 Note that this method assumes the model registry backend URI is the same as that of the
27 tracking backend.
28
29 :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to
30 record the run ID with the model in model registry. ``models:/`` URIs are
31 currently not supported.
32 :param name: Name of the registered model under which to create a new model version. If a
33 registered model with the given name does not exist, it will be created
34 automatically.
35 :param await_registration_for: Number of seconds to wait for the model version to finish
36 being created and is in ``READY`` status. By default, the function
37 waits for five minutes. Specify 0 or None to skip waiting.
38 :param tags: A dictionary of key-value pairs that are converted into
39 :py:class:`mlflow.entities.model_registry.ModelVersionTag` objects.
40 :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by
41 backend.
42
43 .. test-code-block:: python
44 :caption: Example
45
46 import mlflow.sklearn
47 from mlflow.models import infer_signature
48 from sklearn.datasets import make_regression
49 from sklearn.ensemble import RandomForestRegressor
50
51 mlflow.set_tracking_uri("sqlite:////tmp/mlruns.db")
52 params = {"n_estimators": 3, "random_state": 42}
53 X, y = make_regression(n_features=4, n_informative=2, random_state=0, shuffle=False)
54
55 # Log MLflow entities
56 with mlflow.start_run() as run:
57 rfr = RandomForestRegressor(**params).fit(X, y)
58 signature = infer_signature(X, rfr.predict(X))
59 mlflow.log_params(params)
60 mlflow.sklearn.log_model(rfr, artifact_path="sklearn-model", signature=signature)
61
62 model_uri = "runs:/{}/sklearn-model".format(run.info.run_id)
63 mv = mlflow.register_model(model_uri, "RandomForestRegressionModel")
64 print("Name: {}".format(mv.name))
65 print("Version: {}".format(mv.version))
66
67 .. code-block:: text
68 :caption: Output
69
70 Name: RandomForestRegressionModel
71 Version: 1
72 """
73 return _register_model(
74 model_uri=model_uri, name=name, await_registration_for=await_registration_for, tags=tags
75 )
76
77
78 def _register_model(
79 model_uri,
80 name,
81 await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,
82 *,
83 tags: Optional[Dict[str, Any]] = None,
84 local_model_path=None,
85 ) -> ModelVersion:
86 client = MlflowClient()
87 try:
88 create_model_response = client.create_registered_model(name)
89 eprint(f"Successfully registered model '{create_model_response.name}'.")
90 except MlflowException as e:
91 if e.error_code in (
92 ErrorCode.Name(RESOURCE_ALREADY_EXISTS),
93 ErrorCode.Name(ALREADY_EXISTS),
94 ):
95 eprint(
96 "Registered model '%s' already exists. Creating a new version of this model..."
97 % name
98 )
99 else:
100 raise e
101
102 run_id = None
103 source = model_uri
104 if RunsArtifactRepository.is_runs_uri(model_uri):
105 source = RunsArtifactRepository.get_underlying_uri(model_uri)
106 (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)
107
108 create_version_response = client._create_model_version(
109 name=name,
110 source=source,
111 run_id=run_id,
112 tags=tags,
113 await_creation_for=await_registration_for,
114 local_model_path=local_model_path,
115 )
116 eprint(
117 f"Created version '{create_version_response.version}' of model "
118 f"'{create_version_response.name}'."
119 )
120 return create_version_response
121
122
123 def search_registered_models(
124 max_results: Optional[int] = None,
125 filter_string: Optional[str] = None,
126 order_by: Optional[List[str]] = None,
127 ) -> List[RegisteredModel]:
128 """
129 Search for registered models that satisfy the filter criteria.
130
131 :param filter_string: Filter query string
132 (e.g., ``"name = 'a_model_name' and tag.key = 'value1'"``),
133 defaults to searching for all registered models. The following identifiers, comparators,
134 and logical operators are supported.
135
136 Identifiers
137 - ``name``: registered model name.
138 - ``tags.<tag_key>``: registered model tag. If ``tag_key`` contains spaces, it must be
139 wrapped with backticks (e.g., ``"tags.`extra key`"``).
140
141 Comparators
142 - ``=``: Equal to.
143 - ``!=``: Not equal to.
144 - ``LIKE``: Case-sensitive pattern match.
145 - ``ILIKE``: Case-insensitive pattern match.
146
147 Logical operators
148 - ``AND``: Combines two sub-queries and returns True if both of them are True.
149
150 :param max_results: If passed, specifies the maximum number of models desired. If not
151 passed, all models will be returned.
152 :param order_by: List of column names with ASC|DESC annotation, to be used for ordering
153 matching search results.
154 :return: A list of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects
155 that satisfy the search expressions.
156
157 .. test-code-block:: python
158 :caption: Example
159
160 import mlflow
161 from sklearn.linear_model import LogisticRegression
162
163 with mlflow.start_run():
164 mlflow.sklearn.log_model(
165 LogisticRegression(),
166 "Cordoba",
167 registered_model_name="CordobaWeatherForecastModel",
168 )
169 mlflow.sklearn.log_model(
170 LogisticRegression(),
171 "Boston",
172 registered_model_name="BostonWeatherForecastModel",
173 )
174
175 # Get search results filtered by the registered model name
176 filter_string = "name = 'CordobaWeatherForecastModel'"
177 results = mlflow.search_registered_models(filter_string=filter_string)
178 print("-" * 80)
179 for res in results:
180 for mv in res.latest_versions:
181 print("name={}; run_id={}; version={}".format(mv.name, mv.run_id, mv.version))
182
183 # Get search results filtered by the registered model name that matches
184 # prefix pattern
185 filter_string = "name LIKE 'Boston%'"
186 results = mlflow.search_registered_models(filter_string=filter_string)
187 print("-" * 80)
188 for res in results:
189 for mv in res.latest_versions:
190 print("name={}; run_id={}; version={}".format(mv.name, mv.run_id, mv.version))
191
192 # Get all registered models and order them by ascending order of the names
193 results = mlflow.search_registered_models(order_by=["name ASC"])
194 print("-" * 80)
195 for res in results:
196 for mv in res.latest_versions:
197 print("name={}; run_id={}; version={}".format(mv.name, mv.run_id, mv.version))
198
199 .. code-block:: text
200 :caption: Output
201
202 --------------------------------------------------------------------------------
203 name=CordobaWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1
204 --------------------------------------------------------------------------------
205 name=BostonWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1
206 --------------------------------------------------------------------------------
207 name=BostonWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1
208 name=CordobaWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1
209
210 """
211
212 def pagination_wrapper_func(number_to_get, next_page_token):
213 return MlflowClient().search_registered_models(
214 max_results=number_to_get,
215 filter_string=filter_string,
216 order_by=order_by,
217 page_token=next_page_token,
218 )
219
220 return get_results_from_paginated_fn(
221 pagination_wrapper_func,
222 SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,
223 max_results,
224 )
225
226
227 def search_model_versions(
228 max_results: Optional[int] = None,
229 filter_string: Optional[str] = None,
230 order_by: Optional[List[str]] = None,
231 ) -> List[ModelVersion]:
232 def pagination_wrapper_func(number_to_get, next_page_token):
233 return MlflowClient().search_model_versions(
234 max_results=number_to_get,
235 filter_string=filter_string,
236 order_by=order_by,
237 page_token=next_page_token,
238 )
239
240 return get_results_from_paginated_fn(
241 paginated_fn=pagination_wrapper_func,
242 max_results_per_page=SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,
243 max_results=max_results,
244 )
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mlflow/tracking/_model_registry/fluent.py b/mlflow/tracking/_model_registry/fluent.py
--- a/mlflow/tracking/_model_registry/fluent.py
+++ b/mlflow/tracking/_model_registry/fluent.py
@@ -229,6 +229,77 @@
filter_string: Optional[str] = None,
order_by: Optional[List[str]] = None,
) -> List[ModelVersion]:
+ """
+ Search for model versions that satisfy the filter criteria.
+
+ :param filter_string: Filter query string
+ (e.g., ``"name = 'a_model_name' and tag.key = 'value1'"``),
+ defaults to searching for all model versions. The following identifiers, comparators,
+ and logical operators are supported.
+
+ Identifiers
+ - ``name``: model name.
+ - ``source_path``: model version source path.
+ - ``run_id``: The id of the mlflow run that generates the model version.
+ - ``tags.<tag_key>``: model version tag. If ``tag_key`` contains spaces, it must be
+ wrapped with backticks (e.g., ``"tags.`extra key`"``).
+
+ Comparators
+ - ``=``: Equal to.
+ - ``!=``: Not equal to.
+ - ``LIKE``: Case-sensitive pattern match.
+ - ``ILIKE``: Case-insensitive pattern match.
+ - ``IN``: In a value list. Only ``run_id`` identifier supports ``IN`` comparator.
+
+ Logical operators
+ - ``AND``: Combines two sub-queries and returns True if both of them are True.
+
+ :param max_results: If passed, specifies the maximum number of models desired. If not
+ passed, all models will be returned.
+ :param order_by: List of column names with ASC|DESC annotation, to be used for ordering
+ matching search results.
+ :return: A list of :py:class:`mlflow.entities.model_registry.ModelVersion` objects
+ that satisfy the search expressions.
+
+ .. test-code-block:: python
+ :caption: Example
+
+ import mlflow
+ from sklearn.linear_model import LogisticRegression
+
+ for _ in range(2):
+ with mlflow.start_run():
+ mlflow.sklearn.log_model(
+ LogisticRegression(),
+ "Cordoba",
+ registered_model_name="CordobaWeatherForecastModel",
+ )
+
+ # Get all versions of the model filtered by name
+ filter_string = "name = 'CordobaWeatherForecastModel'"
+ results = mlflow.search_model_versions(filter_string=filter_string)
+ print("-" * 80)
+ for res in results:
+ print("name={}; run_id={}; version={}".format(res.name, res.run_id, res.version))
+
+ # Get the version of the model filtered by run_id
+ filter_string = "run_id = 'ae9a606a12834c04a8ef1006d0cff779'"
+ results = mlflow.search_model_versions(filter_string=filter_string)
+ print("-" * 80)
+ for res in results:
+ print("name={}; run_id={}; version={}".format(res.name, res.run_id, res.version))
+
+ .. code-block:: text
+ :caption: Output
+
+ --------------------------------------------------------------------------------
+ name=CordobaWeatherForecastModel; run_id=ae9a606a12834c04a8ef1006d0cff779; version=2
+ name=CordobaWeatherForecastModel; run_id=d8f028b5fedf4faf8e458f7693dfa7ce; version=1
+ --------------------------------------------------------------------------------
+ name=CordobaWeatherForecastModel; run_id=ae9a606a12834c04a8ef1006d0cff779; version=2
+
+ """
+
def pagination_wrapper_func(number_to_get, next_page_token):
return MlflowClient().search_model_versions(
max_results=number_to_get,
|
{"golden_diff": "diff --git a/mlflow/tracking/_model_registry/fluent.py b/mlflow/tracking/_model_registry/fluent.py\n--- a/mlflow/tracking/_model_registry/fluent.py\n+++ b/mlflow/tracking/_model_registry/fluent.py\n@@ -229,6 +229,77 @@\n filter_string: Optional[str] = None,\n order_by: Optional[List[str]] = None,\n ) -> List[ModelVersion]:\n+ \"\"\"\n+ Search for model versions that satisfy the filter criteria.\n+\n+ :param filter_string: Filter query string\n+ (e.g., ``\"name = 'a_model_name' and tag.key = 'value1'\"``),\n+ defaults to searching for all model versions. The following identifiers, comparators,\n+ and logical operators are supported.\n+\n+ Identifiers\n+ - ``name``: model name.\n+ - ``source_path``: model version source path.\n+ - ``run_id``: The id of the mlflow run that generates the model version.\n+ - ``tags.<tag_key>``: model version tag. If ``tag_key`` contains spaces, it must be\n+ wrapped with backticks (e.g., ``\"tags.`extra key`\"``).\n+\n+ Comparators\n+ - ``=``: Equal to.\n+ - ``!=``: Not equal to.\n+ - ``LIKE``: Case-sensitive pattern match.\n+ - ``ILIKE``: Case-insensitive pattern match.\n+ - ``IN``: In a value list. Only ``run_id`` identifier supports ``IN`` comparator.\n+\n+ Logical operators\n+ - ``AND``: Combines two sub-queries and returns True if both of them are True.\n+\n+ :param max_results: If passed, specifies the maximum number of models desired. If not\n+ passed, all models will be returned.\n+ :param order_by: List of column names with ASC|DESC annotation, to be used for ordering\n+ matching search results.\n+ :return: A list of :py:class:`mlflow.entities.model_registry.ModelVersion` objects\n+ that satisfy the search expressions.\n+\n+ .. test-code-block:: python\n+ :caption: Example\n+\n+ import mlflow\n+ from sklearn.linear_model import LogisticRegression\n+\n+ for _ in range(2):\n+ with mlflow.start_run():\n+ mlflow.sklearn.log_model(\n+ LogisticRegression(),\n+ \"Cordoba\",\n+ registered_model_name=\"CordobaWeatherForecastModel\",\n+ )\n+\n+ # Get all versions of the model filtered by name\n+ filter_string = \"name = 'CordobaWeatherForecastModel'\"\n+ results = mlflow.search_model_versions(filter_string=filter_string)\n+ print(\"-\" * 80)\n+ for res in results:\n+ print(\"name={}; run_id={}; version={}\".format(res.name, res.run_id, res.version))\n+\n+ # Get the version of the model filtered by run_id\n+ filter_string = \"run_id = 'ae9a606a12834c04a8ef1006d0cff779'\"\n+ results = mlflow.search_model_versions(filter_string=filter_string)\n+ print(\"-\" * 80)\n+ for res in results:\n+ print(\"name={}; run_id={}; version={}\".format(res.name, res.run_id, res.version))\n+\n+ .. code-block:: text\n+ :caption: Output\n+\n+ --------------------------------------------------------------------------------\n+ name=CordobaWeatherForecastModel; run_id=ae9a606a12834c04a8ef1006d0cff779; version=2\n+ name=CordobaWeatherForecastModel; run_id=d8f028b5fedf4faf8e458f7693dfa7ce; version=1\n+ --------------------------------------------------------------------------------\n+ name=CordobaWeatherForecastModel; run_id=ae9a606a12834c04a8ef1006d0cff779; version=2\n+\n+ \"\"\"\n+\n def pagination_wrapper_func(number_to_get, next_page_token):\n return MlflowClient().search_model_versions(\n max_results=number_to_get,\n", "issue": "[DOC-FIX] No documentation body for mlflow.search_model_versions\n### Willingness to contribute\n\nNo. I cannot contribute a documentation fix at this time.\n\n### URL(s) with the issue\n\n(https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.search_model_versions)\n\n### Description of proposal (what needs changing)\n\nThere is no documentation body for mlflow. search_model_versions() unlike for mlflow.mlflow.search_registered_models().\n", "before_files": [{"content": "from mlflow.tracking.client import MlflowClient\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.entities.model_registry import ModelVersion\nfrom mlflow.entities.model_registry import RegisteredModel\nfrom mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ALREADY_EXISTS, ErrorCode\nfrom mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository\nfrom mlflow.utils.logging_utils import eprint\nfrom mlflow.utils import get_results_from_paginated_fn\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.store.model_registry import (\n SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,\n SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,\n)\nfrom typing import Any, Dict, Optional, List\n\n\ndef register_model(\n model_uri,\n name,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n *,\n tags: Optional[Dict[str, Any]] = None,\n) -> ModelVersion:\n \"\"\"\n Create a new model version in model registry for the model files specified by ``model_uri``.\n Note that this method assumes the model registry backend URI is the same as that of the\n tracking backend.\n\n :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to\n record the run ID with the model in model registry. ``models:/`` URIs are\n currently not supported.\n :param name: Name of the registered model under which to create a new model version. If a\n registered model with the given name does not exist, it will be created\n automatically.\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n :param tags: A dictionary of key-value pairs that are converted into\n :py:class:`mlflow.entities.model_registry.ModelVersionTag` objects.\n :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by\n backend.\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow.sklearn\n from mlflow.models import infer_signature\n from sklearn.datasets import make_regression\n from sklearn.ensemble import RandomForestRegressor\n\n mlflow.set_tracking_uri(\"sqlite:////tmp/mlruns.db\")\n params = {\"n_estimators\": 3, \"random_state\": 42}\n X, y = make_regression(n_features=4, n_informative=2, random_state=0, shuffle=False)\n\n # Log MLflow entities\n with mlflow.start_run() as run:\n rfr = RandomForestRegressor(**params).fit(X, y)\n signature = infer_signature(X, rfr.predict(X))\n mlflow.log_params(params)\n mlflow.sklearn.log_model(rfr, artifact_path=\"sklearn-model\", signature=signature)\n\n model_uri = \"runs:/{}/sklearn-model\".format(run.info.run_id)\n mv = mlflow.register_model(model_uri, \"RandomForestRegressionModel\")\n print(\"Name: {}\".format(mv.name))\n print(\"Version: {}\".format(mv.version))\n\n .. code-block:: text\n :caption: Output\n\n Name: RandomForestRegressionModel\n Version: 1\n \"\"\"\n return _register_model(\n model_uri=model_uri, name=name, await_registration_for=await_registration_for, tags=tags\n )\n\n\ndef _register_model(\n model_uri,\n name,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n *,\n tags: Optional[Dict[str, Any]] = None,\n local_model_path=None,\n) -> ModelVersion:\n client = MlflowClient()\n try:\n create_model_response = client.create_registered_model(name)\n eprint(f\"Successfully registered model '{create_model_response.name}'.\")\n except MlflowException as e:\n if e.error_code in (\n ErrorCode.Name(RESOURCE_ALREADY_EXISTS),\n ErrorCode.Name(ALREADY_EXISTS),\n ):\n eprint(\n \"Registered model '%s' already exists. Creating a new version of this model...\"\n % name\n )\n else:\n raise e\n\n run_id = None\n source = model_uri\n if RunsArtifactRepository.is_runs_uri(model_uri):\n source = RunsArtifactRepository.get_underlying_uri(model_uri)\n (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)\n\n create_version_response = client._create_model_version(\n name=name,\n source=source,\n run_id=run_id,\n tags=tags,\n await_creation_for=await_registration_for,\n local_model_path=local_model_path,\n )\n eprint(\n f\"Created version '{create_version_response.version}' of model \"\n f\"'{create_version_response.name}'.\"\n )\n return create_version_response\n\n\ndef search_registered_models(\n max_results: Optional[int] = None,\n filter_string: Optional[str] = None,\n order_by: Optional[List[str]] = None,\n) -> List[RegisteredModel]:\n \"\"\"\n Search for registered models that satisfy the filter criteria.\n\n :param filter_string: Filter query string\n (e.g., ``\"name = 'a_model_name' and tag.key = 'value1'\"``),\n defaults to searching for all registered models. The following identifiers, comparators,\n and logical operators are supported.\n\n Identifiers\n - ``name``: registered model name.\n - ``tags.<tag_key>``: registered model tag. If ``tag_key`` contains spaces, it must be\n wrapped with backticks (e.g., ``\"tags.`extra key`\"``).\n\n Comparators\n - ``=``: Equal to.\n - ``!=``: Not equal to.\n - ``LIKE``: Case-sensitive pattern match.\n - ``ILIKE``: Case-insensitive pattern match.\n\n Logical operators\n - ``AND``: Combines two sub-queries and returns True if both of them are True.\n\n :param max_results: If passed, specifies the maximum number of models desired. If not\n passed, all models will be returned.\n :param order_by: List of column names with ASC|DESC annotation, to be used for ordering\n matching search results.\n :return: A list of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects\n that satisfy the search expressions.\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow\n from sklearn.linear_model import LogisticRegression\n\n with mlflow.start_run():\n mlflow.sklearn.log_model(\n LogisticRegression(),\n \"Cordoba\",\n registered_model_name=\"CordobaWeatherForecastModel\",\n )\n mlflow.sklearn.log_model(\n LogisticRegression(),\n \"Boston\",\n registered_model_name=\"BostonWeatherForecastModel\",\n )\n\n # Get search results filtered by the registered model name\n filter_string = \"name = 'CordobaWeatherForecastModel'\"\n results = mlflow.search_registered_models(filter_string=filter_string)\n print(\"-\" * 80)\n for res in results:\n for mv in res.latest_versions:\n print(\"name={}; run_id={}; version={}\".format(mv.name, mv.run_id, mv.version))\n\n # Get search results filtered by the registered model name that matches\n # prefix pattern\n filter_string = \"name LIKE 'Boston%'\"\n results = mlflow.search_registered_models(filter_string=filter_string)\n print(\"-\" * 80)\n for res in results:\n for mv in res.latest_versions:\n print(\"name={}; run_id={}; version={}\".format(mv.name, mv.run_id, mv.version))\n\n # Get all registered models and order them by ascending order of the names\n results = mlflow.search_registered_models(order_by=[\"name ASC\"])\n print(\"-\" * 80)\n for res in results:\n for mv in res.latest_versions:\n print(\"name={}; run_id={}; version={}\".format(mv.name, mv.run_id, mv.version))\n\n .. code-block:: text\n :caption: Output\n\n --------------------------------------------------------------------------------\n name=CordobaWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n --------------------------------------------------------------------------------\n name=BostonWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n --------------------------------------------------------------------------------\n name=BostonWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n name=CordobaWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n\n \"\"\"\n\n def pagination_wrapper_func(number_to_get, next_page_token):\n return MlflowClient().search_registered_models(\n max_results=number_to_get,\n filter_string=filter_string,\n order_by=order_by,\n page_token=next_page_token,\n )\n\n return get_results_from_paginated_fn(\n pagination_wrapper_func,\n SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,\n max_results,\n )\n\n\ndef search_model_versions(\n max_results: Optional[int] = None,\n filter_string: Optional[str] = None,\n order_by: Optional[List[str]] = None,\n) -> List[ModelVersion]:\n def pagination_wrapper_func(number_to_get, next_page_token):\n return MlflowClient().search_model_versions(\n max_results=number_to_get,\n filter_string=filter_string,\n order_by=order_by,\n page_token=next_page_token,\n )\n\n return get_results_from_paginated_fn(\n paginated_fn=pagination_wrapper_func,\n max_results_per_page=SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,\n max_results=max_results,\n )\n", "path": "mlflow/tracking/_model_registry/fluent.py"}], "after_files": [{"content": "from mlflow.tracking.client import MlflowClient\nfrom mlflow.exceptions import MlflowException\nfrom mlflow.entities.model_registry import ModelVersion\nfrom mlflow.entities.model_registry import RegisteredModel\nfrom mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ALREADY_EXISTS, ErrorCode\nfrom mlflow.store.artifact.runs_artifact_repo import RunsArtifactRepository\nfrom mlflow.utils.logging_utils import eprint\nfrom mlflow.utils import get_results_from_paginated_fn\nfrom mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS\nfrom mlflow.store.model_registry import (\n SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,\n SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,\n)\nfrom typing import Any, Dict, Optional, List\n\n\ndef register_model(\n model_uri,\n name,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n *,\n tags: Optional[Dict[str, Any]] = None,\n) -> ModelVersion:\n \"\"\"\n Create a new model version in model registry for the model files specified by ``model_uri``.\n Note that this method assumes the model registry backend URI is the same as that of the\n tracking backend.\n\n :param model_uri: URI referring to the MLmodel directory. Use a ``runs:/`` URI if you want to\n record the run ID with the model in model registry. ``models:/`` URIs are\n currently not supported.\n :param name: Name of the registered model under which to create a new model version. If a\n registered model with the given name does not exist, it will be created\n automatically.\n :param await_registration_for: Number of seconds to wait for the model version to finish\n being created and is in ``READY`` status. By default, the function\n waits for five minutes. Specify 0 or None to skip waiting.\n :param tags: A dictionary of key-value pairs that are converted into\n :py:class:`mlflow.entities.model_registry.ModelVersionTag` objects.\n :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object created by\n backend.\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow.sklearn\n from mlflow.models import infer_signature\n from sklearn.datasets import make_regression\n from sklearn.ensemble import RandomForestRegressor\n\n mlflow.set_tracking_uri(\"sqlite:////tmp/mlruns.db\")\n params = {\"n_estimators\": 3, \"random_state\": 42}\n X, y = make_regression(n_features=4, n_informative=2, random_state=0, shuffle=False)\n\n # Log MLflow entities\n with mlflow.start_run() as run:\n rfr = RandomForestRegressor(**params).fit(X, y)\n signature = infer_signature(X, rfr.predict(X))\n mlflow.log_params(params)\n mlflow.sklearn.log_model(rfr, artifact_path=\"sklearn-model\", signature=signature)\n\n model_uri = \"runs:/{}/sklearn-model\".format(run.info.run_id)\n mv = mlflow.register_model(model_uri, \"RandomForestRegressionModel\")\n print(\"Name: {}\".format(mv.name))\n print(\"Version: {}\".format(mv.version))\n\n .. code-block:: text\n :caption: Output\n\n Name: RandomForestRegressionModel\n Version: 1\n \"\"\"\n return _register_model(\n model_uri=model_uri, name=name, await_registration_for=await_registration_for, tags=tags\n )\n\n\ndef _register_model(\n model_uri,\n name,\n await_registration_for=DEFAULT_AWAIT_MAX_SLEEP_SECONDS,\n *,\n tags: Optional[Dict[str, Any]] = None,\n local_model_path=None,\n) -> ModelVersion:\n client = MlflowClient()\n try:\n create_model_response = client.create_registered_model(name)\n eprint(f\"Successfully registered model '{create_model_response.name}'.\")\n except MlflowException as e:\n if e.error_code in (\n ErrorCode.Name(RESOURCE_ALREADY_EXISTS),\n ErrorCode.Name(ALREADY_EXISTS),\n ):\n eprint(\n \"Registered model '%s' already exists. Creating a new version of this model...\"\n % name\n )\n else:\n raise e\n\n run_id = None\n source = model_uri\n if RunsArtifactRepository.is_runs_uri(model_uri):\n source = RunsArtifactRepository.get_underlying_uri(model_uri)\n (run_id, _) = RunsArtifactRepository.parse_runs_uri(model_uri)\n\n create_version_response = client._create_model_version(\n name=name,\n source=source,\n run_id=run_id,\n tags=tags,\n await_creation_for=await_registration_for,\n local_model_path=local_model_path,\n )\n eprint(\n f\"Created version '{create_version_response.version}' of model \"\n f\"'{create_version_response.name}'.\"\n )\n return create_version_response\n\n\ndef search_registered_models(\n max_results: Optional[int] = None,\n filter_string: Optional[str] = None,\n order_by: Optional[List[str]] = None,\n) -> List[RegisteredModel]:\n \"\"\"\n Search for registered models that satisfy the filter criteria.\n\n :param filter_string: Filter query string\n (e.g., ``\"name = 'a_model_name' and tag.key = 'value1'\"``),\n defaults to searching for all registered models. The following identifiers, comparators,\n and logical operators are supported.\n\n Identifiers\n - ``name``: registered model name.\n - ``tags.<tag_key>``: registered model tag. If ``tag_key`` contains spaces, it must be\n wrapped with backticks (e.g., ``\"tags.`extra key`\"``).\n\n Comparators\n - ``=``: Equal to.\n - ``!=``: Not equal to.\n - ``LIKE``: Case-sensitive pattern match.\n - ``ILIKE``: Case-insensitive pattern match.\n\n Logical operators\n - ``AND``: Combines two sub-queries and returns True if both of them are True.\n\n :param max_results: If passed, specifies the maximum number of models desired. If not\n passed, all models will be returned.\n :param order_by: List of column names with ASC|DESC annotation, to be used for ordering\n matching search results.\n :return: A list of :py:class:`mlflow.entities.model_registry.RegisteredModel` objects\n that satisfy the search expressions.\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow\n from sklearn.linear_model import LogisticRegression\n\n with mlflow.start_run():\n mlflow.sklearn.log_model(\n LogisticRegression(),\n \"Cordoba\",\n registered_model_name=\"CordobaWeatherForecastModel\",\n )\n mlflow.sklearn.log_model(\n LogisticRegression(),\n \"Boston\",\n registered_model_name=\"BostonWeatherForecastModel\",\n )\n\n # Get search results filtered by the registered model name\n filter_string = \"name = 'CordobaWeatherForecastModel'\"\n results = mlflow.search_registered_models(filter_string=filter_string)\n print(\"-\" * 80)\n for res in results:\n for mv in res.latest_versions:\n print(\"name={}; run_id={}; version={}\".format(mv.name, mv.run_id, mv.version))\n\n # Get search results filtered by the registered model name that matches\n # prefix pattern\n filter_string = \"name LIKE 'Boston%'\"\n results = mlflow.search_registered_models(filter_string=filter_string)\n print(\"-\" * 80)\n for res in results:\n for mv in res.latest_versions:\n print(\"name={}; run_id={}; version={}\".format(mv.name, mv.run_id, mv.version))\n\n # Get all registered models and order them by ascending order of the names\n results = mlflow.search_registered_models(order_by=[\"name ASC\"])\n print(\"-\" * 80)\n for res in results:\n for mv in res.latest_versions:\n print(\"name={}; run_id={}; version={}\".format(mv.name, mv.run_id, mv.version))\n\n .. code-block:: text\n :caption: Output\n\n --------------------------------------------------------------------------------\n name=CordobaWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n --------------------------------------------------------------------------------\n name=BostonWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n --------------------------------------------------------------------------------\n name=BostonWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n name=CordobaWeatherForecastModel; run_id=248c66a666744b4887bdeb2f9cf7f1c6; version=1\n\n \"\"\"\n\n def pagination_wrapper_func(number_to_get, next_page_token):\n return MlflowClient().search_registered_models(\n max_results=number_to_get,\n filter_string=filter_string,\n order_by=order_by,\n page_token=next_page_token,\n )\n\n return get_results_from_paginated_fn(\n pagination_wrapper_func,\n SEARCH_REGISTERED_MODEL_MAX_RESULTS_DEFAULT,\n max_results,\n )\n\n\ndef search_model_versions(\n max_results: Optional[int] = None,\n filter_string: Optional[str] = None,\n order_by: Optional[List[str]] = None,\n) -> List[ModelVersion]:\n \"\"\"\n Search for model versions that satisfy the filter criteria.\n\n :param filter_string: Filter query string\n (e.g., ``\"name = 'a_model_name' and tag.key = 'value1'\"``),\n defaults to searching for all model versions. The following identifiers, comparators,\n and logical operators are supported.\n\n Identifiers\n - ``name``: model name.\n - ``source_path``: model version source path.\n - ``run_id``: The id of the mlflow run that generates the model version.\n - ``tags.<tag_key>``: model version tag. If ``tag_key`` contains spaces, it must be\n wrapped with backticks (e.g., ``\"tags.`extra key`\"``).\n\n Comparators\n - ``=``: Equal to.\n - ``!=``: Not equal to.\n - ``LIKE``: Case-sensitive pattern match.\n - ``ILIKE``: Case-insensitive pattern match.\n - ``IN``: In a value list. Only ``run_id`` identifier supports ``IN`` comparator.\n\n Logical operators\n - ``AND``: Combines two sub-queries and returns True if both of them are True.\n\n :param max_results: If passed, specifies the maximum number of models desired. If not\n passed, all models will be returned.\n :param order_by: List of column names with ASC|DESC annotation, to be used for ordering\n matching search results.\n :return: A list of :py:class:`mlflow.entities.model_registry.ModelVersion` objects\n that satisfy the search expressions.\n\n .. test-code-block:: python\n :caption: Example\n\n import mlflow\n from sklearn.linear_model import LogisticRegression\n\n for _ in range(2):\n with mlflow.start_run():\n mlflow.sklearn.log_model(\n LogisticRegression(),\n \"Cordoba\",\n registered_model_name=\"CordobaWeatherForecastModel\",\n )\n\n # Get all versions of the model filtered by name\n filter_string = \"name = 'CordobaWeatherForecastModel'\"\n results = mlflow.search_model_versions(filter_string=filter_string)\n print(\"-\" * 80)\n for res in results:\n print(\"name={}; run_id={}; version={}\".format(res.name, res.run_id, res.version))\n\n # Get the version of the model filtered by run_id\n filter_string = \"run_id = 'ae9a606a12834c04a8ef1006d0cff779'\"\n results = mlflow.search_model_versions(filter_string=filter_string)\n print(\"-\" * 80)\n for res in results:\n print(\"name={}; run_id={}; version={}\".format(res.name, res.run_id, res.version))\n\n .. code-block:: text\n :caption: Output\n\n --------------------------------------------------------------------------------\n name=CordobaWeatherForecastModel; run_id=ae9a606a12834c04a8ef1006d0cff779; version=2\n name=CordobaWeatherForecastModel; run_id=d8f028b5fedf4faf8e458f7693dfa7ce; version=1\n --------------------------------------------------------------------------------\n name=CordobaWeatherForecastModel; run_id=ae9a606a12834c04a8ef1006d0cff779; version=2\n\n \"\"\"\n\n def pagination_wrapper_func(number_to_get, next_page_token):\n return MlflowClient().search_model_versions(\n max_results=number_to_get,\n filter_string=filter_string,\n order_by=order_by,\n page_token=next_page_token,\n )\n\n return get_results_from_paginated_fn(\n paginated_fn=pagination_wrapper_func,\n max_results_per_page=SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT,\n max_results=max_results,\n )\n", "path": "mlflow/tracking/_model_registry/fluent.py"}]}
| 3,160 | 949 |
gh_patches_debug_43646
|
rasdani/github-patches
|
git_diff
|
feast-dev__feast-2610
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Keep labels in Field api
I found that new api 'Field' will take place of 'Feature' in 0.21+ feast. but `Field` only have 'name' and 'dtype' parameters. The parameter 'labels' is disappeared.
In my use case 'labels' is very import. 'labels' stores the default value, descriptions,and other things. for example
```python
comic_feature_view = FeatureView(
name="comic_featureV1",
entities=["item_id"],
ttl=Duration(seconds=86400 * 1),
features=[
Feature(name="channel_id", dtype=ValueType.INT32, labels={"default": "14", "desc":"channel"}),
Feature(name="keyword_weight", dtype=ValueType.FLOAT, labels={"default": "0.0", "desc":"keyword's weight"}),
Feature(name="comic_vectorv1", dtype=ValueType.FLOAT, labels={"default": ";".join(["0.0" for i in range(32)]), "desc":"deepwalk vector","faiss_index":"/data/faiss_index/comic_featureV1__comic_vectorv1.index"}),
Feature(name="comic_vectorv2", dtype=ValueType.FLOAT, labels={"default": ";".join(["0.0" for i in range(32)]), "desc":"word2vec vector","faiss_index":"/data/faiss_index/comic_featureV1__comic_vectorv2.index"}),
Feature(name="gender", dtype=ValueType.INT32, labels={"default": "0", "desc":" 0-femal 1-male"}),
Feature(name="pub_time", dtype=ValueType.STRING, labels={"default": "1970-01-01 00:00:00", "desc":"comic's publish time"}),
Feature(name="update_time", dtype=ValueType.STRING, labels={"default": "1970-01-01 00:00:00", "desc":"comic's update time"}),
Feature(name="view_cnt", dtype=ValueType.INT64, labels={"default": "0", "desc":"comic's hot score"}),
Feature(name="collect_cnt", dtype=ValueType.INT64, labels={"default": "0", "desc":"collect count"}),
Feature(name="source_id", dtype=ValueType.INT32, labels={"default": "0", "desc":"comic is from(0-unknown,1-japen,2-usa,3- other)"}),
```
So please keep the parameter 'labels' in Field api
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sdk/python/feast/field.py`
Content:
```
1 # Copyright 2022 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from feast.feature import Feature
16 from feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FieldProto
17 from feast.types import FeastType, from_value_type
18 from feast.value_type import ValueType
19
20
21 class Field:
22 """
23 A Field represents a set of values with the same structure.
24
25 Attributes:
26 name: The name of the field.
27 dtype: The type of the field, such as string or float.
28 """
29
30 name: str
31 dtype: FeastType
32
33 def __init__(
34 self, *, name: str, dtype: FeastType,
35 ):
36 """
37 Creates a Field object.
38
39 Args:
40 name: The name of the field.
41 dtype: The type of the field, such as string or float.
42 """
43 self.name = name
44 self.dtype = dtype
45
46 def __eq__(self, other):
47 if self.name != other.name or self.dtype != other.dtype:
48 return False
49 return True
50
51 def __hash__(self):
52 return hash((self.name, hash(self.dtype)))
53
54 def __lt__(self, other):
55 return self.name < other.name
56
57 def __repr__(self):
58 return f"{self.name}-{self.dtype}"
59
60 def __str__(self):
61 return f"Field(name={self.name}, dtype={self.dtype})"
62
63 def to_proto(self) -> FieldProto:
64 """Converts a Field object to its protobuf representation."""
65 value_type = self.dtype.to_value_type()
66 return FieldProto(name=self.name, value_type=value_type.value)
67
68 @classmethod
69 def from_proto(cls, field_proto: FieldProto):
70 """
71 Creates a Field object from a protobuf representation.
72
73 Args:
74 field_proto: FieldProto protobuf object
75 """
76 value_type = ValueType(field_proto.value_type)
77 return cls(name=field_proto.name, dtype=from_value_type(value_type=value_type))
78
79 @classmethod
80 def from_feature(cls, feature: Feature):
81 """
82 Creates a Field object from a Feature object.
83
84 Args:
85 feature: Feature object to convert.
86 """
87 return cls(name=feature.name, dtype=from_value_type(feature.dtype))
88
```
Path: `sdk/python/feast/feature.py`
Content:
```
1 # Copyright 2020 The Feast Authors
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 from typing import Dict, Optional
16
17 from feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FeatureSpecProto
18 from feast.protos.feast.types.Value_pb2 import ValueType as ValueTypeProto
19 from feast.value_type import ValueType
20
21
22 class Feature:
23 """
24 A Feature represents a class of serveable feature.
25
26 Args:
27 name: Name of the feature.
28 dtype: The type of the feature, such as string or float.
29 labels (optional): User-defined metadata in dictionary form.
30 """
31
32 def __init__(
33 self, name: str, dtype: ValueType, labels: Optional[Dict[str, str]] = None,
34 ):
35 """Creates a Feature object."""
36 self._name = name
37 if not isinstance(dtype, ValueType):
38 raise ValueError("dtype is not a valid ValueType")
39 if dtype is ValueType.UNKNOWN:
40 raise ValueError(f"dtype cannot be {dtype}")
41 self._dtype = dtype
42 if labels is None:
43 self._labels = dict()
44 else:
45 self._labels = labels
46
47 def __eq__(self, other):
48 if self.name != other.name or self.dtype != other.dtype:
49 return False
50 return True
51
52 def __lt__(self, other):
53 return self.name < other.name
54
55 def __repr__(self):
56 # return string representation of the reference
57 return f"{self.name}-{self.dtype}"
58
59 def __str__(self):
60 # readable string of the reference
61 return f"Feature<{self.__repr__()}>"
62
63 @property
64 def name(self):
65 """
66 Gets the name of this feature.
67 """
68 return self._name
69
70 @property
71 def dtype(self) -> ValueType:
72 """
73 Gets the data type of this feature.
74 """
75 return self._dtype
76
77 @property
78 def labels(self) -> Dict[str, str]:
79 """
80 Gets the labels of this feature.
81 """
82 return self._labels
83
84 def to_proto(self) -> FeatureSpecProto:
85 """
86 Converts Feature object to its Protocol Buffer representation.
87
88 Returns:
89 A FeatureSpecProto protobuf.
90 """
91 value_type = ValueTypeProto.Enum.Value(self.dtype.name)
92
93 return FeatureSpecProto(
94 name=self.name, value_type=value_type, labels=self.labels,
95 )
96
97 @classmethod
98 def from_proto(cls, feature_proto: FeatureSpecProto):
99 """
100 Args:
101 feature_proto: FeatureSpecV2 protobuf object
102
103 Returns:
104 Feature object
105 """
106 feature = cls(
107 name=feature_proto.name,
108 dtype=ValueType(feature_proto.value_type),
109 labels=dict(feature_proto.labels),
110 )
111
112 return feature
113
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sdk/python/feast/feature.py b/sdk/python/feast/feature.py
--- a/sdk/python/feast/feature.py
+++ b/sdk/python/feast/feature.py
@@ -91,7 +91,7 @@
value_type = ValueTypeProto.Enum.Value(self.dtype.name)
return FeatureSpecProto(
- name=self.name, value_type=value_type, labels=self.labels,
+ name=self.name, value_type=value_type, tags=self.labels,
)
@classmethod
@@ -106,7 +106,7 @@
feature = cls(
name=feature_proto.name,
dtype=ValueType(feature_proto.value_type),
- labels=dict(feature_proto.labels),
+ labels=dict(feature_proto.tags),
)
return feature
diff --git a/sdk/python/feast/field.py b/sdk/python/feast/field.py
--- a/sdk/python/feast/field.py
+++ b/sdk/python/feast/field.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+from typing import Dict, Optional
+
from feast.feature import Feature
from feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FieldProto
from feast.types import FeastType, from_value_type
@@ -25,13 +27,15 @@
Attributes:
name: The name of the field.
dtype: The type of the field, such as string or float.
+ tags: User-defined metadata in dictionary form.
"""
name: str
dtype: FeastType
+ tags: Dict[str, str]
def __init__(
- self, *, name: str, dtype: FeastType,
+ self, *, name: str, dtype: FeastType, tags: Optional[Dict[str, str]] = None,
):
"""
Creates a Field object.
@@ -39,12 +43,18 @@
Args:
name: The name of the field.
dtype: The type of the field, such as string or float.
+ tags (optional): User-defined metadata in dictionary form.
"""
self.name = name
self.dtype = dtype
+ self.tags = tags or {}
def __eq__(self, other):
- if self.name != other.name or self.dtype != other.dtype:
+ if (
+ self.name != other.name
+ or self.dtype != other.dtype
+ or self.tags != other.tags
+ ):
return False
return True
@@ -58,12 +68,12 @@
return f"{self.name}-{self.dtype}"
def __str__(self):
- return f"Field(name={self.name}, dtype={self.dtype})"
+ return f"Field(name={self.name}, dtype={self.dtype}, tags={self.tags})"
def to_proto(self) -> FieldProto:
"""Converts a Field object to its protobuf representation."""
value_type = self.dtype.to_value_type()
- return FieldProto(name=self.name, value_type=value_type.value)
+ return FieldProto(name=self.name, value_type=value_type.value, tags=self.tags)
@classmethod
def from_proto(cls, field_proto: FieldProto):
@@ -74,7 +84,11 @@
field_proto: FieldProto protobuf object
"""
value_type = ValueType(field_proto.value_type)
- return cls(name=field_proto.name, dtype=from_value_type(value_type=value_type))
+ return cls(
+ name=field_proto.name,
+ dtype=from_value_type(value_type=value_type),
+ tags=dict(field_proto.tags),
+ )
@classmethod
def from_feature(cls, feature: Feature):
@@ -84,4 +98,6 @@
Args:
feature: Feature object to convert.
"""
- return cls(name=feature.name, dtype=from_value_type(feature.dtype))
+ return cls(
+ name=feature.name, dtype=from_value_type(feature.dtype), tags=feature.labels
+ )
|
{"golden_diff": "diff --git a/sdk/python/feast/feature.py b/sdk/python/feast/feature.py\n--- a/sdk/python/feast/feature.py\n+++ b/sdk/python/feast/feature.py\n@@ -91,7 +91,7 @@\n value_type = ValueTypeProto.Enum.Value(self.dtype.name)\n \n return FeatureSpecProto(\n- name=self.name, value_type=value_type, labels=self.labels,\n+ name=self.name, value_type=value_type, tags=self.labels,\n )\n \n @classmethod\n@@ -106,7 +106,7 @@\n feature = cls(\n name=feature_proto.name,\n dtype=ValueType(feature_proto.value_type),\n- labels=dict(feature_proto.labels),\n+ labels=dict(feature_proto.tags),\n )\n \n return feature\ndiff --git a/sdk/python/feast/field.py b/sdk/python/feast/field.py\n--- a/sdk/python/feast/field.py\n+++ b/sdk/python/feast/field.py\n@@ -12,6 +12,8 @@\n # See the License for the specific language governing permissions and\n # limitations under the License.\n \n+from typing import Dict, Optional\n+\n from feast.feature import Feature\n from feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FieldProto\n from feast.types import FeastType, from_value_type\n@@ -25,13 +27,15 @@\n Attributes:\n name: The name of the field.\n dtype: The type of the field, such as string or float.\n+ tags: User-defined metadata in dictionary form.\n \"\"\"\n \n name: str\n dtype: FeastType\n+ tags: Dict[str, str]\n \n def __init__(\n- self, *, name: str, dtype: FeastType,\n+ self, *, name: str, dtype: FeastType, tags: Optional[Dict[str, str]] = None,\n ):\n \"\"\"\n Creates a Field object.\n@@ -39,12 +43,18 @@\n Args:\n name: The name of the field.\n dtype: The type of the field, such as string or float.\n+ tags (optional): User-defined metadata in dictionary form.\n \"\"\"\n self.name = name\n self.dtype = dtype\n+ self.tags = tags or {}\n \n def __eq__(self, other):\n- if self.name != other.name or self.dtype != other.dtype:\n+ if (\n+ self.name != other.name\n+ or self.dtype != other.dtype\n+ or self.tags != other.tags\n+ ):\n return False\n return True\n \n@@ -58,12 +68,12 @@\n return f\"{self.name}-{self.dtype}\"\n \n def __str__(self):\n- return f\"Field(name={self.name}, dtype={self.dtype})\"\n+ return f\"Field(name={self.name}, dtype={self.dtype}, tags={self.tags})\"\n \n def to_proto(self) -> FieldProto:\n \"\"\"Converts a Field object to its protobuf representation.\"\"\"\n value_type = self.dtype.to_value_type()\n- return FieldProto(name=self.name, value_type=value_type.value)\n+ return FieldProto(name=self.name, value_type=value_type.value, tags=self.tags)\n \n @classmethod\n def from_proto(cls, field_proto: FieldProto):\n@@ -74,7 +84,11 @@\n field_proto: FieldProto protobuf object\n \"\"\"\n value_type = ValueType(field_proto.value_type)\n- return cls(name=field_proto.name, dtype=from_value_type(value_type=value_type))\n+ return cls(\n+ name=field_proto.name,\n+ dtype=from_value_type(value_type=value_type),\n+ tags=dict(field_proto.tags),\n+ )\n \n @classmethod\n def from_feature(cls, feature: Feature):\n@@ -84,4 +98,6 @@\n Args:\n feature: Feature object to convert.\n \"\"\"\n- return cls(name=feature.name, dtype=from_value_type(feature.dtype))\n+ return cls(\n+ name=feature.name, dtype=from_value_type(feature.dtype), tags=feature.labels\n+ )\n", "issue": "Keep labels in Field api\nI found that new api 'Field' will take place of 'Feature' in 0.21+ feast. but `Field` only have 'name' and 'dtype' parameters. The parameter 'labels' is disappeared. \r\nIn my use case 'labels' is very import. 'labels' stores the default value, descriptions,and other things. for example\r\n\r\n```python\r\ncomic_feature_view = FeatureView(\r\n name=\"comic_featureV1\",\r\n entities=[\"item_id\"],\r\n ttl=Duration(seconds=86400 * 1),\r\n features=[\r\n Feature(name=\"channel_id\", dtype=ValueType.INT32, labels={\"default\": \"14\", \"desc\":\"channel\"}),\r\n Feature(name=\"keyword_weight\", dtype=ValueType.FLOAT, labels={\"default\": \"0.0\", \"desc\":\"keyword's weight\"}),\r\n Feature(name=\"comic_vectorv1\", dtype=ValueType.FLOAT, labels={\"default\": \";\".join([\"0.0\" for i in range(32)]), \"desc\":\"deepwalk vector\",\"faiss_index\":\"/data/faiss_index/comic_featureV1__comic_vectorv1.index\"}),\r\n Feature(name=\"comic_vectorv2\", dtype=ValueType.FLOAT, labels={\"default\": \";\".join([\"0.0\" for i in range(32)]), \"desc\":\"word2vec vector\",\"faiss_index\":\"/data/faiss_index/comic_featureV1__comic_vectorv2.index\"}),\r\n Feature(name=\"gender\", dtype=ValueType.INT32, labels={\"default\": \"0\", \"desc\":\" 0-femal 1-male\"}),\r\n Feature(name=\"pub_time\", dtype=ValueType.STRING, labels={\"default\": \"1970-01-01 00:00:00\", \"desc\":\"comic's publish time\"}),\r\n Feature(name=\"update_time\", dtype=ValueType.STRING, labels={\"default\": \"1970-01-01 00:00:00\", \"desc\":\"comic's update time\"}),\r\n Feature(name=\"view_cnt\", dtype=ValueType.INT64, labels={\"default\": \"0\", \"desc\":\"comic's hot score\"}),\r\n Feature(name=\"collect_cnt\", dtype=ValueType.INT64, labels={\"default\": \"0\", \"desc\":\"collect count\"}),\r\n Feature(name=\"source_id\", dtype=ValueType.INT32, labels={\"default\": \"0\", \"desc\":\"comic is from(0-unknown\uff0c1-japen\uff0c2-usa\uff0c3- other)\"}),\r\n```\r\n\r\nSo please keep the parameter 'labels' in Field api\r\n\n", "before_files": [{"content": "# Copyright 2022 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom feast.feature import Feature\nfrom feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FieldProto\nfrom feast.types import FeastType, from_value_type\nfrom feast.value_type import ValueType\n\n\nclass Field:\n \"\"\"\n A Field represents a set of values with the same structure.\n\n Attributes:\n name: The name of the field.\n dtype: The type of the field, such as string or float.\n \"\"\"\n\n name: str\n dtype: FeastType\n\n def __init__(\n self, *, name: str, dtype: FeastType,\n ):\n \"\"\"\n Creates a Field object.\n\n Args:\n name: The name of the field.\n dtype: The type of the field, such as string or float.\n \"\"\"\n self.name = name\n self.dtype = dtype\n\n def __eq__(self, other):\n if self.name != other.name or self.dtype != other.dtype:\n return False\n return True\n\n def __hash__(self):\n return hash((self.name, hash(self.dtype)))\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __repr__(self):\n return f\"{self.name}-{self.dtype}\"\n\n def __str__(self):\n return f\"Field(name={self.name}, dtype={self.dtype})\"\n\n def to_proto(self) -> FieldProto:\n \"\"\"Converts a Field object to its protobuf representation.\"\"\"\n value_type = self.dtype.to_value_type()\n return FieldProto(name=self.name, value_type=value_type.value)\n\n @classmethod\n def from_proto(cls, field_proto: FieldProto):\n \"\"\"\n Creates a Field object from a protobuf representation.\n\n Args:\n field_proto: FieldProto protobuf object\n \"\"\"\n value_type = ValueType(field_proto.value_type)\n return cls(name=field_proto.name, dtype=from_value_type(value_type=value_type))\n\n @classmethod\n def from_feature(cls, feature: Feature):\n \"\"\"\n Creates a Field object from a Feature object.\n\n Args:\n feature: Feature object to convert.\n \"\"\"\n return cls(name=feature.name, dtype=from_value_type(feature.dtype))\n", "path": "sdk/python/feast/field.py"}, {"content": "# Copyright 2020 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Optional\n\nfrom feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FeatureSpecProto\nfrom feast.protos.feast.types.Value_pb2 import ValueType as ValueTypeProto\nfrom feast.value_type import ValueType\n\n\nclass Feature:\n \"\"\"\n A Feature represents a class of serveable feature.\n\n Args:\n name: Name of the feature.\n dtype: The type of the feature, such as string or float.\n labels (optional): User-defined metadata in dictionary form.\n \"\"\"\n\n def __init__(\n self, name: str, dtype: ValueType, labels: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Creates a Feature object.\"\"\"\n self._name = name\n if not isinstance(dtype, ValueType):\n raise ValueError(\"dtype is not a valid ValueType\")\n if dtype is ValueType.UNKNOWN:\n raise ValueError(f\"dtype cannot be {dtype}\")\n self._dtype = dtype\n if labels is None:\n self._labels = dict()\n else:\n self._labels = labels\n\n def __eq__(self, other):\n if self.name != other.name or self.dtype != other.dtype:\n return False\n return True\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __repr__(self):\n # return string representation of the reference\n return f\"{self.name}-{self.dtype}\"\n\n def __str__(self):\n # readable string of the reference\n return f\"Feature<{self.__repr__()}>\"\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this feature.\n \"\"\"\n return self._name\n\n @property\n def dtype(self) -> ValueType:\n \"\"\"\n Gets the data type of this feature.\n \"\"\"\n return self._dtype\n\n @property\n def labels(self) -> Dict[str, str]:\n \"\"\"\n Gets the labels of this feature.\n \"\"\"\n return self._labels\n\n def to_proto(self) -> FeatureSpecProto:\n \"\"\"\n Converts Feature object to its Protocol Buffer representation.\n\n Returns:\n A FeatureSpecProto protobuf.\n \"\"\"\n value_type = ValueTypeProto.Enum.Value(self.dtype.name)\n\n return FeatureSpecProto(\n name=self.name, value_type=value_type, labels=self.labels,\n )\n\n @classmethod\n def from_proto(cls, feature_proto: FeatureSpecProto):\n \"\"\"\n Args:\n feature_proto: FeatureSpecV2 protobuf object\n\n Returns:\n Feature object\n \"\"\"\n feature = cls(\n name=feature_proto.name,\n dtype=ValueType(feature_proto.value_type),\n labels=dict(feature_proto.labels),\n )\n\n return feature\n", "path": "sdk/python/feast/feature.py"}], "after_files": [{"content": "# Copyright 2022 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Optional\n\nfrom feast.feature import Feature\nfrom feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FieldProto\nfrom feast.types import FeastType, from_value_type\nfrom feast.value_type import ValueType\n\n\nclass Field:\n \"\"\"\n A Field represents a set of values with the same structure.\n\n Attributes:\n name: The name of the field.\n dtype: The type of the field, such as string or float.\n tags: User-defined metadata in dictionary form.\n \"\"\"\n\n name: str\n dtype: FeastType\n tags: Dict[str, str]\n\n def __init__(\n self, *, name: str, dtype: FeastType, tags: Optional[Dict[str, str]] = None,\n ):\n \"\"\"\n Creates a Field object.\n\n Args:\n name: The name of the field.\n dtype: The type of the field, such as string or float.\n tags (optional): User-defined metadata in dictionary form.\n \"\"\"\n self.name = name\n self.dtype = dtype\n self.tags = tags or {}\n\n def __eq__(self, other):\n if (\n self.name != other.name\n or self.dtype != other.dtype\n or self.tags != other.tags\n ):\n return False\n return True\n\n def __hash__(self):\n return hash((self.name, hash(self.dtype)))\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __repr__(self):\n return f\"{self.name}-{self.dtype}\"\n\n def __str__(self):\n return f\"Field(name={self.name}, dtype={self.dtype}, tags={self.tags})\"\n\n def to_proto(self) -> FieldProto:\n \"\"\"Converts a Field object to its protobuf representation.\"\"\"\n value_type = self.dtype.to_value_type()\n return FieldProto(name=self.name, value_type=value_type.value, tags=self.tags)\n\n @classmethod\n def from_proto(cls, field_proto: FieldProto):\n \"\"\"\n Creates a Field object from a protobuf representation.\n\n Args:\n field_proto: FieldProto protobuf object\n \"\"\"\n value_type = ValueType(field_proto.value_type)\n return cls(\n name=field_proto.name,\n dtype=from_value_type(value_type=value_type),\n tags=dict(field_proto.tags),\n )\n\n @classmethod\n def from_feature(cls, feature: Feature):\n \"\"\"\n Creates a Field object from a Feature object.\n\n Args:\n feature: Feature object to convert.\n \"\"\"\n return cls(\n name=feature.name, dtype=from_value_type(feature.dtype), tags=feature.labels\n )\n", "path": "sdk/python/feast/field.py"}, {"content": "# Copyright 2020 The Feast Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom typing import Dict, Optional\n\nfrom feast.protos.feast.core.Feature_pb2 import FeatureSpecV2 as FeatureSpecProto\nfrom feast.protos.feast.types.Value_pb2 import ValueType as ValueTypeProto\nfrom feast.value_type import ValueType\n\n\nclass Feature:\n \"\"\"\n A Feature represents a class of serveable feature.\n\n Args:\n name: Name of the feature.\n dtype: The type of the feature, such as string or float.\n labels (optional): User-defined metadata in dictionary form.\n \"\"\"\n\n def __init__(\n self, name: str, dtype: ValueType, labels: Optional[Dict[str, str]] = None,\n ):\n \"\"\"Creates a Feature object.\"\"\"\n self._name = name\n if not isinstance(dtype, ValueType):\n raise ValueError(\"dtype is not a valid ValueType\")\n if dtype is ValueType.UNKNOWN:\n raise ValueError(f\"dtype cannot be {dtype}\")\n self._dtype = dtype\n if labels is None:\n self._labels = dict()\n else:\n self._labels = labels\n\n def __eq__(self, other):\n if self.name != other.name or self.dtype != other.dtype:\n return False\n return True\n\n def __lt__(self, other):\n return self.name < other.name\n\n def __repr__(self):\n # return string representation of the reference\n return f\"{self.name}-{self.dtype}\"\n\n def __str__(self):\n # readable string of the reference\n return f\"Feature<{self.__repr__()}>\"\n\n @property\n def name(self):\n \"\"\"\n Gets the name of this feature.\n \"\"\"\n return self._name\n\n @property\n def dtype(self) -> ValueType:\n \"\"\"\n Gets the data type of this feature.\n \"\"\"\n return self._dtype\n\n @property\n def labels(self) -> Dict[str, str]:\n \"\"\"\n Gets the labels of this feature.\n \"\"\"\n return self._labels\n\n def to_proto(self) -> FeatureSpecProto:\n \"\"\"\n Converts Feature object to its Protocol Buffer representation.\n\n Returns:\n A FeatureSpecProto protobuf.\n \"\"\"\n value_type = ValueTypeProto.Enum.Value(self.dtype.name)\n\n return FeatureSpecProto(\n name=self.name, value_type=value_type, tags=self.labels,\n )\n\n @classmethod\n def from_proto(cls, feature_proto: FeatureSpecProto):\n \"\"\"\n Args:\n feature_proto: FeatureSpecV2 protobuf object\n\n Returns:\n Feature object\n \"\"\"\n feature = cls(\n name=feature_proto.name,\n dtype=ValueType(feature_proto.value_type),\n labels=dict(feature_proto.tags),\n )\n\n return feature\n", "path": "sdk/python/feast/feature.py"}]}
| 2,533 | 891 |
gh_patches_debug_35104
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-912
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
"git add -N" prevents restoring stashed changes.
To reproduce, start with this simple `pre-commit-config.yaml` in an otherwise empty repo:
```yaml
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: master
hooks:
- id: end-of-file-fixer
```
The hook used doesn't really matter. end-of-file-fixer is just an example.
Run the following:
```bash
echo "new" > newfile
echo "\n\n\n" > needs-fixing
git add -N newfile
# newfile is now staged as an empty file
git add needs-fixing
git commit -m "fix"
```
The following output is generated:
```
[WARNING] Unstaged files detected.
[INFO] Stashing unstaged files to /home/henniss/.cache/pre-commit/patch1544663784.
Fix End of Files.........................................................Failed
hookid: end-of-file-fixer
Files were modified by this hook. Additional output:
Fixing needs-fixing
[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...
An unexpected error has occurred: CalledProcessError: Command: ('/usr/lib/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/home/henniss/.cache/pre-commit/patch1544663784')
Return code: 1
Expected return code: 0
Output: (none)
Errors:
error: newfile: already exists in working directory
Check the log at /home/henniss/.cache/pre-commit/pre-commit.log
```
`cat newfile` now shows that it is empty. The unstaged changes aren't restored.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/git.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import logging
4 import os.path
5 import sys
6
7 from pre_commit.util import cmd_output
8
9
10 logger = logging.getLogger(__name__)
11
12
13 def zsplit(s):
14 s = s.strip('\0')
15 if s:
16 return s.split('\0')
17 else:
18 return []
19
20
21 def no_git_env():
22 # Too many bugs dealing with environment variables and GIT:
23 # https://github.com/pre-commit/pre-commit/issues/300
24 # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running
25 # pre-commit hooks
26 # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE
27 # while running pre-commit hooks in submodules.
28 # GIT_DIR: Causes git clone to clone wrong thing
29 # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit
30 return {
31 k: v for k, v in os.environ.items()
32 if not k.startswith('GIT_') or k in {'GIT_SSH'}
33 }
34
35
36 def get_root():
37 return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()
38
39
40 def get_git_dir(git_root='.'):
41 opts = ('--git-common-dir', '--git-dir')
42 _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)
43 for line, opt in zip(out.splitlines(), opts):
44 if line != opt: # pragma: no branch (git < 2.5)
45 return os.path.normpath(os.path.join(git_root, line))
46 else:
47 raise AssertionError('unreachable: no git dir')
48
49
50 def get_remote_url(git_root):
51 ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]
52 return ret.strip()
53
54
55 def is_in_merge_conflict():
56 git_dir = get_git_dir('.')
57 return (
58 os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and
59 os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))
60 )
61
62
63 def parse_merge_msg_for_conflicts(merge_msg):
64 # Conflicted files start with tabs
65 return [
66 line.lstrip(b'#').strip().decode('UTF-8')
67 for line in merge_msg.splitlines()
68 # '#\t' for git 2.4.1
69 if line.startswith((b'\t', b'#\t'))
70 ]
71
72
73 def get_conflicted_files():
74 logger.info('Checking merge-conflict files only.')
75 # Need to get the conflicted files from the MERGE_MSG because they could
76 # have resolved the conflict by choosing one side or the other
77 with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:
78 merge_msg = f.read()
79 merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)
80
81 # This will get the rest of the changes made after the merge.
82 # If they resolved the merge conflict by choosing a mesh of both sides
83 # this will also include the conflicted files
84 tree_hash = cmd_output('git', 'write-tree')[1].strip()
85 merge_diff_filenames = zsplit(cmd_output(
86 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
87 '-m', tree_hash, 'HEAD', 'MERGE_HEAD',
88 )[1])
89 return set(merge_conflict_filenames) | set(merge_diff_filenames)
90
91
92 def get_staged_files():
93 return zsplit(cmd_output(
94 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',
95 # Everything except for D
96 '--diff-filter=ACMRTUXB',
97 )[1])
98
99
100 def get_all_files():
101 return zsplit(cmd_output('git', 'ls-files', '-z')[1])
102
103
104 def get_changed_files(new, old):
105 return zsplit(cmd_output(
106 'git', 'diff', '--name-only', '--no-ext-diff', '-z',
107 '{}...{}'.format(old, new),
108 )[1])
109
110
111 def head_rev(remote):
112 _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')
113 return out.split()[0]
114
115
116 def has_diff(*args, **kwargs):
117 repo = kwargs.pop('repo', '.')
118 assert not kwargs, kwargs
119 cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args
120 return cmd_output(*cmd, cwd=repo, retcode=None)[0]
121
122
123 def commit(repo='.'):
124 env = no_git_env()
125 name, email = 'pre-commit', '[email protected]'
126 env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name
127 env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email
128 cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')
129 cmd_output(*cmd, cwd=repo, env=env)
130
131
132 def git_path(name, repo='.'):
133 _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)
134 return os.path.join(repo, out.strip())
135
136
137 def check_for_cygwin_mismatch():
138 """See https://github.com/pre-commit/pre-commit/issues/354"""
139 if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)
140 is_cygwin_python = sys.platform == 'cygwin'
141 toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]
142 is_cygwin_git = toplevel.startswith('/')
143
144 if is_cygwin_python ^ is_cygwin_git:
145 exe_type = {True: '(cygwin)', False: '(windows)'}
146 logger.warn(
147 'pre-commit has detected a mix of cygwin python / git\n'
148 'This combination is not supported, it is likely you will '
149 'receive an error later in the program.\n'
150 'Make sure to use cygwin git+python while using cygwin\n'
151 'These can be installed through the cygwin installer.\n'
152 ' - python {}\n'
153 ' - git {}\n'.format(
154 exe_type[is_cygwin_python], exe_type[is_cygwin_git],
155 ),
156 )
157
```
Path: `pre_commit/staged_files_only.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import contextlib
4 import io
5 import logging
6 import os.path
7 import time
8
9 from pre_commit.util import CalledProcessError
10 from pre_commit.util import cmd_output
11 from pre_commit.util import mkdirp
12
13
14 logger = logging.getLogger('pre_commit')
15
16
17 def _git_apply(patch):
18 args = ('apply', '--whitespace=nowarn', patch)
19 try:
20 cmd_output('git', *args, encoding=None)
21 except CalledProcessError:
22 # Retry with autocrlf=false -- see #570
23 cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)
24
25
26 @contextlib.contextmanager
27 def staged_files_only(patch_dir):
28 """Clear any unstaged changes from the git working directory inside this
29 context.
30 """
31 # Determine if there are unstaged files
32 tree = cmd_output('git', 'write-tree')[1].strip()
33 retcode, diff_stdout_binary, _ = cmd_output(
34 'git', 'diff-index', '--ignore-submodules', '--binary',
35 '--exit-code', '--no-color', '--no-ext-diff', tree, '--',
36 retcode=None,
37 encoding=None,
38 )
39 if retcode and diff_stdout_binary.strip():
40 patch_filename = 'patch{}'.format(int(time.time()))
41 patch_filename = os.path.join(patch_dir, patch_filename)
42 logger.warning('Unstaged files detected.')
43 logger.info(
44 'Stashing unstaged files to {}.'.format(patch_filename),
45 )
46 # Save the current unstaged changes as a patch
47 mkdirp(patch_dir)
48 with io.open(patch_filename, 'wb') as patch_file:
49 patch_file.write(diff_stdout_binary)
50
51 # Clear the working directory of unstaged changes
52 cmd_output('git', 'checkout', '--', '.')
53 try:
54 yield
55 finally:
56 # Try to apply the patch we saved
57 try:
58 _git_apply(patch_filename)
59 except CalledProcessError:
60 logger.warning(
61 'Stashed changes conflicted with hook auto-fixes... '
62 'Rolling back fixes...',
63 )
64 # We failed to apply the patch, presumably due to fixes made
65 # by hooks.
66 # Roll back the changes made by hooks.
67 cmd_output('git', 'checkout', '--', '.')
68 _git_apply(patch_filename)
69 logger.info('Restored changes from {}.'.format(patch_filename))
70 else:
71 # There weren't any staged files so we don't need to do anything
72 # special
73 yield
74
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/git.py b/pre_commit/git.py
--- a/pre_commit/git.py
+++ b/pre_commit/git.py
@@ -97,6 +97,20 @@
)[1])
+def intent_to_add_files():
+ _, stdout_binary, _ = cmd_output('git', 'status', '--porcelain', '-z')
+ parts = list(reversed(zsplit(stdout_binary)))
+ intent_to_add = []
+ while parts:
+ line = parts.pop()
+ status, filename = line[:3], line[3:]
+ if status[0] in {'C', 'R'}: # renames / moves have an additional arg
+ parts.pop()
+ if status[1] == 'A':
+ intent_to_add.append(filename)
+ return intent_to_add
+
+
def get_all_files():
return zsplit(cmd_output('git', 'ls-files', '-z')[1])
diff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py
--- a/pre_commit/staged_files_only.py
+++ b/pre_commit/staged_files_only.py
@@ -6,9 +6,11 @@
import os.path
import time
+from pre_commit import git
from pre_commit.util import CalledProcessError
from pre_commit.util import cmd_output
from pre_commit.util import mkdirp
+from pre_commit.xargs import xargs
logger = logging.getLogger('pre_commit')
@@ -24,11 +26,22 @@
@contextlib.contextmanager
-def staged_files_only(patch_dir):
- """Clear any unstaged changes from the git working directory inside this
- context.
- """
- # Determine if there are unstaged files
+def _intent_to_add_cleared():
+ intent_to_add = git.intent_to_add_files()
+ if intent_to_add:
+ logger.warning('Unstaged intent-to-add files detected.')
+
+ xargs(('git', 'rm', '--cached', '--'), intent_to_add)
+ try:
+ yield
+ finally:
+ xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)
+ else:
+ yield
+
+
[email protected]
+def _unstaged_changes_cleared(patch_dir):
tree = cmd_output('git', 'write-tree')[1].strip()
retcode, diff_stdout_binary, _ = cmd_output(
'git', 'diff-index', '--ignore-submodules', '--binary',
@@ -71,3 +84,12 @@
# There weren't any staged files so we don't need to do anything
# special
yield
+
+
[email protected]
+def staged_files_only(patch_dir):
+ """Clear any unstaged changes from the git working directory inside this
+ context.
+ """
+ with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):
+ yield
|
{"golden_diff": "diff --git a/pre_commit/git.py b/pre_commit/git.py\n--- a/pre_commit/git.py\n+++ b/pre_commit/git.py\n@@ -97,6 +97,20 @@\n )[1])\n \n \n+def intent_to_add_files():\n+ _, stdout_binary, _ = cmd_output('git', 'status', '--porcelain', '-z')\n+ parts = list(reversed(zsplit(stdout_binary)))\n+ intent_to_add = []\n+ while parts:\n+ line = parts.pop()\n+ status, filename = line[:3], line[3:]\n+ if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n+ parts.pop()\n+ if status[1] == 'A':\n+ intent_to_add.append(filename)\n+ return intent_to_add\n+\n+\n def get_all_files():\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n \ndiff --git a/pre_commit/staged_files_only.py b/pre_commit/staged_files_only.py\n--- a/pre_commit/staged_files_only.py\n+++ b/pre_commit/staged_files_only.py\n@@ -6,9 +6,11 @@\n import os.path\n import time\n \n+from pre_commit import git\n from pre_commit.util import CalledProcessError\n from pre_commit.util import cmd_output\n from pre_commit.util import mkdirp\n+from pre_commit.xargs import xargs\n \n \n logger = logging.getLogger('pre_commit')\n@@ -24,11 +26,22 @@\n \n \n @contextlib.contextmanager\n-def staged_files_only(patch_dir):\n- \"\"\"Clear any unstaged changes from the git working directory inside this\n- context.\n- \"\"\"\n- # Determine if there are unstaged files\n+def _intent_to_add_cleared():\n+ intent_to_add = git.intent_to_add_files()\n+ if intent_to_add:\n+ logger.warning('Unstaged intent-to-add files detected.')\n+\n+ xargs(('git', 'rm', '--cached', '--'), intent_to_add)\n+ try:\n+ yield\n+ finally:\n+ xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)\n+ else:\n+ yield\n+\n+\[email protected]\n+def _unstaged_changes_cleared(patch_dir):\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n@@ -71,3 +84,12 @@\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n+\n+\[email protected]\n+def staged_files_only(patch_dir):\n+ \"\"\"Clear any unstaged changes from the git working directory inside this\n+ context.\n+ \"\"\"\n+ with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):\n+ yield\n", "issue": "\"git add -N\" prevents restoring stashed changes.\nTo reproduce, start with this simple `pre-commit-config.yaml` in an otherwise empty repo:\r\n\r\n```yaml\r\nrepos:\r\n- repo: https://github.com/pre-commit/pre-commit-hooks\r\n rev: master\r\n hooks: \r\n - id: end-of-file-fixer\r\n```\r\n\r\nThe hook used doesn't really matter. end-of-file-fixer is just an example. \r\n\r\nRun the following:\r\n```bash\r\necho \"new\" > newfile\r\necho \"\\n\\n\\n\" > needs-fixing\r\ngit add -N newfile\r\n# newfile is now staged as an empty file\r\ngit add needs-fixing\r\ngit commit -m \"fix\"\r\n```\r\n\r\nThe following output is generated: \r\n\r\n```\r\n[WARNING] Unstaged files detected.\r\n[INFO] Stashing unstaged files to /home/henniss/.cache/pre-commit/patch1544663784.\r\nFix End of Files.........................................................Failed\r\nhookid: end-of-file-fixer\r\n\r\nFiles were modified by this hook. Additional output:\r\n\r\nFixing needs-fixing\r\n\r\n[WARNING] Stashed changes conflicted with hook auto-fixes... Rolling back fixes...\r\nAn unexpected error has occurred: CalledProcessError: Command: ('/usr/lib/git-core/git', '-c', 'core.autocrlf=false', 'apply', '--whitespace=nowarn', '/home/henniss/.cache/pre-commit/patch1544663784')\r\nReturn code: 1\r\nExpected return code: 0\r\nOutput: (none)\r\nErrors:\r\n error: newfile: already exists in working directory\r\n\r\n\r\nCheck the log at /home/henniss/.cache/pre-commit/pre-commit.log\r\n```\r\n\r\n`cat newfile` now shows that it is empty. The unstaged changes aren't restored.\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s):\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return {\n k: v for k, v in os.environ.items()\n if not k.startswith('GIT_') or k in {'GIT_SSH'}\n }\n\n\ndef get_root():\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n\n\ndef get_git_dir(git_root='.'):\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1])\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files():\n return zsplit(cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n )[1])\n\n\ndef get_all_files():\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(new, old):\n return zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '{}...{}'.format(old, new),\n )[1])\n\n\ndef head_rev(remote):\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args, **kwargs):\n repo = kwargs.pop('repo', '.')\n assert not kwargs, kwargs\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args\n return cmd_output(*cmd, cwd=repo, retcode=None)[0]\n\n\ndef commit(repo='.'):\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name, repo='.'):\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python], exe_type[is_cygwin_git],\n ),\n )\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport time\n\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import mkdirp\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output('git', *args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)\n\n\[email protected]\ndef staged_files_only(patch_dir):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n # Determine if there are unstaged files\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = 'patch{}'.format(int(time.time()))\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n mkdirp(patch_dir)\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_output('git', 'checkout', '--', '.')\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output('git', 'checkout', '--', '.')\n _git_apply(patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n", "path": "pre_commit/staged_files_only.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport logging\nimport os.path\nimport sys\n\nfrom pre_commit.util import cmd_output\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef zsplit(s):\n s = s.strip('\\0')\n if s:\n return s.split('\\0')\n else:\n return []\n\n\ndef no_git_env():\n # Too many bugs dealing with environment variables and GIT:\n # https://github.com/pre-commit/pre-commit/issues/300\n # In git 2.6.3 (maybe others), git exports GIT_WORK_TREE while running\n # pre-commit hooks\n # In git 1.9.1 (maybe others), git exports GIT_DIR and GIT_INDEX_FILE\n # while running pre-commit hooks in submodules.\n # GIT_DIR: Causes git clone to clone wrong thing\n # GIT_INDEX_FILE: Causes 'error invalid object ...' during commit\n return {\n k: v for k, v in os.environ.items()\n if not k.startswith('GIT_') or k in {'GIT_SSH'}\n }\n\n\ndef get_root():\n return cmd_output('git', 'rev-parse', '--show-toplevel')[1].strip()\n\n\ndef get_git_dir(git_root='.'):\n opts = ('--git-common-dir', '--git-dir')\n _, out, _ = cmd_output('git', 'rev-parse', *opts, cwd=git_root)\n for line, opt in zip(out.splitlines(), opts):\n if line != opt: # pragma: no branch (git < 2.5)\n return os.path.normpath(os.path.join(git_root, line))\n else:\n raise AssertionError('unreachable: no git dir')\n\n\ndef get_remote_url(git_root):\n ret = cmd_output('git', 'config', 'remote.origin.url', cwd=git_root)[1]\n return ret.strip()\n\n\ndef is_in_merge_conflict():\n git_dir = get_git_dir('.')\n return (\n os.path.exists(os.path.join(git_dir, 'MERGE_MSG')) and\n os.path.exists(os.path.join(git_dir, 'MERGE_HEAD'))\n )\n\n\ndef parse_merge_msg_for_conflicts(merge_msg):\n # Conflicted files start with tabs\n return [\n line.lstrip(b'#').strip().decode('UTF-8')\n for line in merge_msg.splitlines()\n # '#\\t' for git 2.4.1\n if line.startswith((b'\\t', b'#\\t'))\n ]\n\n\ndef get_conflicted_files():\n logger.info('Checking merge-conflict files only.')\n # Need to get the conflicted files from the MERGE_MSG because they could\n # have resolved the conflict by choosing one side or the other\n with open(os.path.join(get_git_dir('.'), 'MERGE_MSG'), 'rb') as f:\n merge_msg = f.read()\n merge_conflict_filenames = parse_merge_msg_for_conflicts(merge_msg)\n\n # This will get the rest of the changes made after the merge.\n # If they resolved the merge conflict by choosing a mesh of both sides\n # this will also include the conflicted files\n tree_hash = cmd_output('git', 'write-tree')[1].strip()\n merge_diff_filenames = zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '-m', tree_hash, 'HEAD', 'MERGE_HEAD',\n )[1])\n return set(merge_conflict_filenames) | set(merge_diff_filenames)\n\n\ndef get_staged_files():\n return zsplit(cmd_output(\n 'git', 'diff', '--staged', '--name-only', '--no-ext-diff', '-z',\n # Everything except for D\n '--diff-filter=ACMRTUXB',\n )[1])\n\n\ndef intent_to_add_files():\n _, stdout_binary, _ = cmd_output('git', 'status', '--porcelain', '-z')\n parts = list(reversed(zsplit(stdout_binary)))\n intent_to_add = []\n while parts:\n line = parts.pop()\n status, filename = line[:3], line[3:]\n if status[0] in {'C', 'R'}: # renames / moves have an additional arg\n parts.pop()\n if status[1] == 'A':\n intent_to_add.append(filename)\n return intent_to_add\n\n\ndef get_all_files():\n return zsplit(cmd_output('git', 'ls-files', '-z')[1])\n\n\ndef get_changed_files(new, old):\n return zsplit(cmd_output(\n 'git', 'diff', '--name-only', '--no-ext-diff', '-z',\n '{}...{}'.format(old, new),\n )[1])\n\n\ndef head_rev(remote):\n _, out, _ = cmd_output('git', 'ls-remote', '--exit-code', remote, 'HEAD')\n return out.split()[0]\n\n\ndef has_diff(*args, **kwargs):\n repo = kwargs.pop('repo', '.')\n assert not kwargs, kwargs\n cmd = ('git', 'diff', '--quiet', '--no-ext-diff') + args\n return cmd_output(*cmd, cwd=repo, retcode=None)[0]\n\n\ndef commit(repo='.'):\n env = no_git_env()\n name, email = 'pre-commit', '[email protected]'\n env['GIT_AUTHOR_NAME'] = env['GIT_COMMITTER_NAME'] = name\n env['GIT_AUTHOR_EMAIL'] = env['GIT_COMMITTER_EMAIL'] = email\n cmd = ('git', 'commit', '--no-edit', '--no-gpg-sign', '-n', '-minit')\n cmd_output(*cmd, cwd=repo, env=env)\n\n\ndef git_path(name, repo='.'):\n _, out, _ = cmd_output('git', 'rev-parse', '--git-path', name, cwd=repo)\n return os.path.join(repo, out.strip())\n\n\ndef check_for_cygwin_mismatch():\n \"\"\"See https://github.com/pre-commit/pre-commit/issues/354\"\"\"\n if sys.platform in ('cygwin', 'win32'): # pragma: no cover (windows)\n is_cygwin_python = sys.platform == 'cygwin'\n toplevel = cmd_output('git', 'rev-parse', '--show-toplevel')[1]\n is_cygwin_git = toplevel.startswith('/')\n\n if is_cygwin_python ^ is_cygwin_git:\n exe_type = {True: '(cygwin)', False: '(windows)'}\n logger.warn(\n 'pre-commit has detected a mix of cygwin python / git\\n'\n 'This combination is not supported, it is likely you will '\n 'receive an error later in the program.\\n'\n 'Make sure to use cygwin git+python while using cygwin\\n'\n 'These can be installed through the cygwin installer.\\n'\n ' - python {}\\n'\n ' - git {}\\n'.format(\n exe_type[is_cygwin_python], exe_type[is_cygwin_git],\n ),\n )\n", "path": "pre_commit/git.py"}, {"content": "from __future__ import unicode_literals\n\nimport contextlib\nimport io\nimport logging\nimport os.path\nimport time\n\nfrom pre_commit import git\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import mkdirp\nfrom pre_commit.xargs import xargs\n\n\nlogger = logging.getLogger('pre_commit')\n\n\ndef _git_apply(patch):\n args = ('apply', '--whitespace=nowarn', patch)\n try:\n cmd_output('git', *args, encoding=None)\n except CalledProcessError:\n # Retry with autocrlf=false -- see #570\n cmd_output('git', '-c', 'core.autocrlf=false', *args, encoding=None)\n\n\[email protected]\ndef _intent_to_add_cleared():\n intent_to_add = git.intent_to_add_files()\n if intent_to_add:\n logger.warning('Unstaged intent-to-add files detected.')\n\n xargs(('git', 'rm', '--cached', '--'), intent_to_add)\n try:\n yield\n finally:\n xargs(('git', 'add', '--intent-to-add', '--'), intent_to_add)\n else:\n yield\n\n\[email protected]\ndef _unstaged_changes_cleared(patch_dir):\n tree = cmd_output('git', 'write-tree')[1].strip()\n retcode, diff_stdout_binary, _ = cmd_output(\n 'git', 'diff-index', '--ignore-submodules', '--binary',\n '--exit-code', '--no-color', '--no-ext-diff', tree, '--',\n retcode=None,\n encoding=None,\n )\n if retcode and diff_stdout_binary.strip():\n patch_filename = 'patch{}'.format(int(time.time()))\n patch_filename = os.path.join(patch_dir, patch_filename)\n logger.warning('Unstaged files detected.')\n logger.info(\n 'Stashing unstaged files to {}.'.format(patch_filename),\n )\n # Save the current unstaged changes as a patch\n mkdirp(patch_dir)\n with io.open(patch_filename, 'wb') as patch_file:\n patch_file.write(diff_stdout_binary)\n\n # Clear the working directory of unstaged changes\n cmd_output('git', 'checkout', '--', '.')\n try:\n yield\n finally:\n # Try to apply the patch we saved\n try:\n _git_apply(patch_filename)\n except CalledProcessError:\n logger.warning(\n 'Stashed changes conflicted with hook auto-fixes... '\n 'Rolling back fixes...',\n )\n # We failed to apply the patch, presumably due to fixes made\n # by hooks.\n # Roll back the changes made by hooks.\n cmd_output('git', 'checkout', '--', '.')\n _git_apply(patch_filename)\n logger.info('Restored changes from {}.'.format(patch_filename))\n else:\n # There weren't any staged files so we don't need to do anything\n # special\n yield\n\n\[email protected]\ndef staged_files_only(patch_dir):\n \"\"\"Clear any unstaged changes from the git working directory inside this\n context.\n \"\"\"\n with _intent_to_add_cleared(), _unstaged_changes_cleared(patch_dir):\n yield\n", "path": "pre_commit/staged_files_only.py"}]}
| 3,116 | 645 |
gh_patches_debug_43409
|
rasdani/github-patches
|
git_diff
|
encode__starlette-1220
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`StaticFiles` causes Internal Server Error when user accesses existing files as directory
### Checklist
<!-- Please make sure you check all these items before submitting your bug report. -->
- [x] The bug is reproducible against the latest release and/or `master`.
- [x] There are no similar issues or pull requests to fix it yet.
### Describe the bug
<!-- A clear and concise description of what the bug is. -->
`StaticFiles` causes Internal Server Error when user accesses existing files as directory (e.g. `/static/somefile.txt/foobar`)
### To reproduce
<!-- Provide a *minimal* example with steps to reproduce the bug locally.
NOTE: try to keep any external dependencies *at an absolute minimum*
(middleware, servers, proxies, certificates...).
In other words, remove anything that doesn't make the bug go away.
-->
1. create virtual env and activate it with `python -m venv venv && source venv/bin/activate`
2. install dependencies with `pip install starlette uvicorn aiofiles`
3. setup application as follows:
directory structure:
```
.
├── poc.py
├── static
│ └── sample.txt
└── venv
```
poc.py
```py
# code from https://www.starlette.io/staticfiles/
from starlette.applications import Starlette
from starlette.routing import Mount
from starlette.staticfiles import StaticFiles
routes = [
Mount("/static", app=StaticFiles(directory="static"), name="static"),
]
app = Starlette(routes=routes)
```
4. run application with `uvicorn poc:app`
5. access `http://127.0.0.1:8000/static/sample.txt/foo`
### Expected behavior
<!-- A clear and concise description of what you expected to happen. -->
`StaticFiles` returns "404 Not Found" HTTP response (as Apache HTTP Server and Nginx does).
### Actual behavior
<!-- A clear and concise description of what actually happens. -->
`StaticFiles` returns "500 Internal Server Error" HTTP response.
### Debugging material
<!-- Any tracebacks, screenshots, etc. that can help understanding the problem.
NOTE:
- Please list tracebacks in full (don't truncate them).
- Consider using `<details>` to make tracebacks/logs collapsible if they're very large (see https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d).
-->
console log with tracebacks:
```
INFO: Started server process [13052]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)
INFO: 127.0.0.1:64288 - "GET /static/sample.txt/foo HTTP/1.1" 500 Internal Server Error
ERROR: Exception in ASGI application
Traceback (most recent call last):
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py", line 394, in run_asgi
result = await app(self.scope, self.receive, self.send)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py", line 45, in __call__
return await self.app(scope, receive, send)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/applications.py", line 112, in __call__
await self.middleware_stack(scope, receive, send)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/middleware/errors.py", line 181, in __call__
raise exc from None
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/middleware/errors.py", line 159, in __call__
await self.app(scope, receive, _send)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/exceptions.py", line 82, in __call__
raise exc from None
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/exceptions.py", line 71, in __call__
await self.app(scope, receive, sender)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/routing.py", line 582, in __call__
await route.handle(scope, receive, send)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/routing.py", line 392, in handle
await self.app(scope, receive, send)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/staticfiles.py", line 97, in __call__
response = await self.get_response(path, scope)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/staticfiles.py", line 114, in get_response
full_path, stat_result = await self.lookup_path(path)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/staticfiles.py", line 154, in lookup_path
stat_result = await aio_stat(full_path)
File "/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/aiofiles/os.py", line 13, in run
return await loop.run_in_executor(executor, pfunc)
File "/usr/local/Cellar/[email protected]/3.9.1_2/Frameworks/Python.framework/Versions/3.9/lib/python3.9/concurrent/futures/thread.py", line 52, in run
result = self.fn(*self.args, **self.kwargs)
NotADirectoryError: [Errno 20] Not a directory: '/Users/xkhorasan/programs/test_starlette/static/sample.txt/foo'
```
### Environment
- OS: macOS
- Python version: 3.9.1
- Starlette version: 0.14.1
### Additional context
<!-- Any additional information that can help understanding the problem.
Eg. linked issues, or a description of what you were trying to achieve. -->
Apache HTTP Server and Nginx treat this case (access existing file as directory) as "404 Not Found".
Samples:
* Apache HTTP Server: https://httpd.apache.org/images/httpd_logo_wide_new.png/foo
* Nginx: https://nginx.org/download/nginx-1.18.0.tar.gz/foo
Return 404 Not Found for accessing file as directory pattern
fixes #1123
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `starlette/staticfiles.py`
Content:
```
1 import importlib.util
2 import os
3 import stat
4 import typing
5 from email.utils import parsedate
6
7 import anyio
8
9 from starlette.datastructures import URL, Headers
10 from starlette.responses import (
11 FileResponse,
12 PlainTextResponse,
13 RedirectResponse,
14 Response,
15 )
16 from starlette.types import Receive, Scope, Send
17
18 PathLike = typing.Union[str, "os.PathLike[str]"]
19
20
21 class NotModifiedResponse(Response):
22 NOT_MODIFIED_HEADERS = (
23 "cache-control",
24 "content-location",
25 "date",
26 "etag",
27 "expires",
28 "vary",
29 )
30
31 def __init__(self, headers: Headers):
32 super().__init__(
33 status_code=304,
34 headers={
35 name: value
36 for name, value in headers.items()
37 if name in self.NOT_MODIFIED_HEADERS
38 },
39 )
40
41
42 class StaticFiles:
43 def __init__(
44 self,
45 *,
46 directory: PathLike = None,
47 packages: typing.List[str] = None,
48 html: bool = False,
49 check_dir: bool = True,
50 ) -> None:
51 self.directory = directory
52 self.packages = packages
53 self.all_directories = self.get_directories(directory, packages)
54 self.html = html
55 self.config_checked = False
56 if check_dir and directory is not None and not os.path.isdir(directory):
57 raise RuntimeError(f"Directory '{directory}' does not exist")
58
59 def get_directories(
60 self, directory: PathLike = None, packages: typing.List[str] = None
61 ) -> typing.List[PathLike]:
62 """
63 Given `directory` and `packages` arguments, return a list of all the
64 directories that should be used for serving static files from.
65 """
66 directories = []
67 if directory is not None:
68 directories.append(directory)
69
70 for package in packages or []:
71 spec = importlib.util.find_spec(package)
72 assert spec is not None, f"Package {package!r} could not be found."
73 assert (
74 spec.origin is not None
75 ), f"Directory 'statics' in package {package!r} could not be found."
76 package_directory = os.path.normpath(
77 os.path.join(spec.origin, "..", "statics")
78 )
79 assert os.path.isdir(
80 package_directory
81 ), f"Directory 'statics' in package {package!r} could not be found."
82 directories.append(package_directory)
83
84 return directories
85
86 async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
87 """
88 The ASGI entry point.
89 """
90 assert scope["type"] == "http"
91
92 if not self.config_checked:
93 await self.check_config()
94 self.config_checked = True
95
96 path = self.get_path(scope)
97 response = await self.get_response(path, scope)
98 await response(scope, receive, send)
99
100 def get_path(self, scope: Scope) -> str:
101 """
102 Given the ASGI scope, return the `path` string to serve up,
103 with OS specific path seperators, and any '..', '.' components removed.
104 """
105 return os.path.normpath(os.path.join(*scope["path"].split("/")))
106
107 async def get_response(self, path: str, scope: Scope) -> Response:
108 """
109 Returns an HTTP response, given the incoming path, method and request headers.
110 """
111 if scope["method"] not in ("GET", "HEAD"):
112 return PlainTextResponse("Method Not Allowed", status_code=405)
113
114 full_path, stat_result = await self.lookup_path(path)
115
116 if stat_result and stat.S_ISREG(stat_result.st_mode):
117 # We have a static file to serve.
118 return self.file_response(full_path, stat_result, scope)
119
120 elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:
121 # We're in HTML mode, and have got a directory URL.
122 # Check if we have 'index.html' file to serve.
123 index_path = os.path.join(path, "index.html")
124 full_path, stat_result = await self.lookup_path(index_path)
125 if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
126 if not scope["path"].endswith("/"):
127 # Directory URLs should redirect to always end in "/".
128 url = URL(scope=scope)
129 url = url.replace(path=url.path + "/")
130 return RedirectResponse(url=url)
131 return self.file_response(full_path, stat_result, scope)
132
133 if self.html:
134 # Check for '404.html' if we're in HTML mode.
135 full_path, stat_result = await self.lookup_path("404.html")
136 if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
137 return FileResponse(
138 full_path,
139 stat_result=stat_result,
140 method=scope["method"],
141 status_code=404,
142 )
143
144 return PlainTextResponse("Not Found", status_code=404)
145
146 async def lookup_path(
147 self, path: str
148 ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
149 for directory in self.all_directories:
150 full_path = os.path.realpath(os.path.join(directory, path))
151 directory = os.path.realpath(directory)
152 if os.path.commonprefix([full_path, directory]) != directory:
153 # Don't allow misbehaving clients to break out of the static files
154 # directory.
155 continue
156 try:
157 stat_result = await anyio.to_thread.run_sync(os.stat, full_path)
158 return full_path, stat_result
159 except FileNotFoundError:
160 pass
161 return "", None
162
163 def file_response(
164 self,
165 full_path: PathLike,
166 stat_result: os.stat_result,
167 scope: Scope,
168 status_code: int = 200,
169 ) -> Response:
170 method = scope["method"]
171 request_headers = Headers(scope=scope)
172
173 response = FileResponse(
174 full_path, status_code=status_code, stat_result=stat_result, method=method
175 )
176 if self.is_not_modified(response.headers, request_headers):
177 return NotModifiedResponse(response.headers)
178 return response
179
180 async def check_config(self) -> None:
181 """
182 Perform a one-off configuration check that StaticFiles is actually
183 pointed at a directory, so that we can raise loud errors rather than
184 just returning 404 responses.
185 """
186 if self.directory is None:
187 return
188
189 try:
190 stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)
191 except FileNotFoundError:
192 raise RuntimeError(
193 f"StaticFiles directory '{self.directory}' does not exist."
194 )
195 if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):
196 raise RuntimeError(
197 f"StaticFiles path '{self.directory}' is not a directory."
198 )
199
200 def is_not_modified(
201 self, response_headers: Headers, request_headers: Headers
202 ) -> bool:
203 """
204 Given the request and response headers, return `True` if an HTTP
205 "Not Modified" response could be returned instead.
206 """
207 try:
208 if_none_match = request_headers["if-none-match"]
209 etag = response_headers["etag"]
210 if if_none_match == etag:
211 return True
212 except KeyError:
213 pass
214
215 try:
216 if_modified_since = parsedate(request_headers["if-modified-since"])
217 last_modified = parsedate(response_headers["last-modified"])
218 if (
219 if_modified_since is not None
220 and last_modified is not None
221 and if_modified_since >= last_modified
222 ):
223 return True
224 except KeyError:
225 pass
226
227 return False
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py
--- a/starlette/staticfiles.py
+++ b/starlette/staticfiles.py
@@ -7,12 +7,8 @@
import anyio
from starlette.datastructures import URL, Headers
-from starlette.responses import (
- FileResponse,
- PlainTextResponse,
- RedirectResponse,
- Response,
-)
+from starlette.exceptions import HTTPException
+from starlette.responses import FileResponse, RedirectResponse, Response
from starlette.types import Receive, Scope, Send
PathLike = typing.Union[str, "os.PathLike[str]"]
@@ -109,9 +105,30 @@
Returns an HTTP response, given the incoming path, method and request headers.
"""
if scope["method"] not in ("GET", "HEAD"):
- return PlainTextResponse("Method Not Allowed", status_code=405)
+ raise HTTPException(status_code=405)
- full_path, stat_result = await self.lookup_path(path)
+ try:
+ full_path, stat_result = await anyio.to_thread.run_sync(
+ self.lookup_path, path
+ )
+ except (FileNotFoundError, NotADirectoryError):
+ if self.html:
+ # Check for '404.html' if we're in HTML mode.
+ full_path, stat_result = await anyio.to_thread.run_sync(
+ self.lookup_path, "404.html"
+ )
+ if stat_result and stat.S_ISREG(stat_result.st_mode):
+ return FileResponse(
+ full_path,
+ stat_result=stat_result,
+ method=scope["method"],
+ status_code=404,
+ )
+ raise HTTPException(status_code=404)
+ except PermissionError:
+ raise HTTPException(status_code=401)
+ except OSError:
+ raise
if stat_result and stat.S_ISREG(stat_result.st_mode):
# We have a static file to serve.
@@ -121,7 +138,9 @@
# We're in HTML mode, and have got a directory URL.
# Check if we have 'index.html' file to serve.
index_path = os.path.join(path, "index.html")
- full_path, stat_result = await self.lookup_path(index_path)
+ full_path, stat_result = await anyio.to_thread.run_sync(
+ self.lookup_path, index_path
+ )
if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
if not scope["path"].endswith("/"):
# Directory URLs should redirect to always end in "/".
@@ -130,20 +149,9 @@
return RedirectResponse(url=url)
return self.file_response(full_path, stat_result, scope)
- if self.html:
- # Check for '404.html' if we're in HTML mode.
- full_path, stat_result = await self.lookup_path("404.html")
- if stat_result is not None and stat.S_ISREG(stat_result.st_mode):
- return FileResponse(
- full_path,
- stat_result=stat_result,
- method=scope["method"],
- status_code=404,
- )
-
- return PlainTextResponse("Not Found", status_code=404)
+ raise HTTPException(status_code=404)
- async def lookup_path(
+ def lookup_path(
self, path: str
) -> typing.Tuple[str, typing.Optional[os.stat_result]]:
for directory in self.all_directories:
@@ -153,11 +161,7 @@
# Don't allow misbehaving clients to break out of the static files
# directory.
continue
- try:
- stat_result = await anyio.to_thread.run_sync(os.stat, full_path)
- return full_path, stat_result
- except FileNotFoundError:
- pass
+ return full_path, os.stat(full_path)
return "", None
def file_response(
|
{"golden_diff": "diff --git a/starlette/staticfiles.py b/starlette/staticfiles.py\n--- a/starlette/staticfiles.py\n+++ b/starlette/staticfiles.py\n@@ -7,12 +7,8 @@\n import anyio\n \n from starlette.datastructures import URL, Headers\n-from starlette.responses import (\n- FileResponse,\n- PlainTextResponse,\n- RedirectResponse,\n- Response,\n-)\n+from starlette.exceptions import HTTPException\n+from starlette.responses import FileResponse, RedirectResponse, Response\n from starlette.types import Receive, Scope, Send\n \n PathLike = typing.Union[str, \"os.PathLike[str]\"]\n@@ -109,9 +105,30 @@\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n- return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n+ raise HTTPException(status_code=405)\n \n- full_path, stat_result = await self.lookup_path(path)\n+ try:\n+ full_path, stat_result = await anyio.to_thread.run_sync(\n+ self.lookup_path, path\n+ )\n+ except (FileNotFoundError, NotADirectoryError):\n+ if self.html:\n+ # Check for '404.html' if we're in HTML mode.\n+ full_path, stat_result = await anyio.to_thread.run_sync(\n+ self.lookup_path, \"404.html\"\n+ )\n+ if stat_result and stat.S_ISREG(stat_result.st_mode):\n+ return FileResponse(\n+ full_path,\n+ stat_result=stat_result,\n+ method=scope[\"method\"],\n+ status_code=404,\n+ )\n+ raise HTTPException(status_code=404)\n+ except PermissionError:\n+ raise HTTPException(status_code=401)\n+ except OSError:\n+ raise\n \n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n@@ -121,7 +138,9 @@\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n- full_path, stat_result = await self.lookup_path(index_path)\n+ full_path, stat_result = await anyio.to_thread.run_sync(\n+ self.lookup_path, index_path\n+ )\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n@@ -130,20 +149,9 @@\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n \n- if self.html:\n- # Check for '404.html' if we're in HTML mode.\n- full_path, stat_result = await self.lookup_path(\"404.html\")\n- if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n- return FileResponse(\n- full_path,\n- stat_result=stat_result,\n- method=scope[\"method\"],\n- status_code=404,\n- )\n-\n- return PlainTextResponse(\"Not Found\", status_code=404)\n+ raise HTTPException(status_code=404)\n \n- async def lookup_path(\n+ def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n@@ -153,11 +161,7 @@\n # Don't allow misbehaving clients to break out of the static files\n # directory.\n continue\n- try:\n- stat_result = await anyio.to_thread.run_sync(os.stat, full_path)\n- return full_path, stat_result\n- except FileNotFoundError:\n- pass\n+ return full_path, os.stat(full_path)\n return \"\", None\n \n def file_response(\n", "issue": "`StaticFiles` causes Internal Server Error when user accesses existing files as directory\n### Checklist\r\n\r\n<!-- Please make sure you check all these items before submitting your bug report. -->\r\n\r\n- [x] The bug is reproducible against the latest release and/or `master`.\r\n- [x] There are no similar issues or pull requests to fix it yet.\r\n\r\n### Describe the bug\r\n\r\n<!-- A clear and concise description of what the bug is. -->\r\n`StaticFiles` causes Internal Server Error when user accesses existing files as directory (e.g. `/static/somefile.txt/foobar`)\r\n\r\n### To reproduce\r\n\r\n<!-- Provide a *minimal* example with steps to reproduce the bug locally.\r\n\r\nNOTE: try to keep any external dependencies *at an absolute minimum*\r\n(middleware, servers, proxies, certificates...).\r\nIn other words, remove anything that doesn't make the bug go away.\r\n-->\r\n\r\n1. create virtual env and activate it with `python -m venv venv && source venv/bin/activate`\r\n2. install dependencies with `pip install starlette uvicorn aiofiles`\r\n3. setup application as follows:\r\n\r\ndirectory structure:\r\n```\r\n.\r\n\u251c\u2500\u2500 poc.py\r\n\u251c\u2500\u2500 static\r\n\u2502 \u2514\u2500\u2500 sample.txt\r\n\u2514\u2500\u2500 venv\r\n```\r\n\r\npoc.py\r\n```py\r\n# code from https://www.starlette.io/staticfiles/\r\nfrom starlette.applications import Starlette\r\nfrom starlette.routing import Mount\r\nfrom starlette.staticfiles import StaticFiles\r\n\r\n\r\nroutes = [\r\n Mount(\"/static\", app=StaticFiles(directory=\"static\"), name=\"static\"),\r\n]\r\n\r\napp = Starlette(routes=routes)\r\n```\r\n\r\n4. run application with `uvicorn poc:app`\r\n5. access `http://127.0.0.1:8000/static/sample.txt/foo`\r\n\r\n### Expected behavior\r\n\r\n<!-- A clear and concise description of what you expected to happen. -->\r\n`StaticFiles` returns \"404 Not Found\" HTTP response (as Apache HTTP Server and Nginx does).\r\n\r\n### Actual behavior\r\n\r\n<!-- A clear and concise description of what actually happens. -->\r\n`StaticFiles` returns \"500 Internal Server Error\" HTTP response.\r\n\r\n### Debugging material\r\n\r\n<!-- Any tracebacks, screenshots, etc. that can help understanding the problem.\r\n\r\nNOTE:\r\n- Please list tracebacks in full (don't truncate them).\r\n- Consider using `<details>` to make tracebacks/logs collapsible if they're very large (see https://gist.github.com/ericclemmons/b146fe5da72ca1f706b2ef72a20ac39d).\r\n-->\r\nconsole log with tracebacks:\r\n```\r\nINFO: Started server process [13052]\r\nINFO: Waiting for application startup.\r\nINFO: Application startup complete.\r\nINFO: Uvicorn running on http://127.0.0.1:8000 (Press CTRL+C to quit)\r\nINFO: 127.0.0.1:64288 - \"GET /static/sample.txt/foo HTTP/1.1\" 500 Internal Server Error\r\nERROR: Exception in ASGI application\r\nTraceback (most recent call last):\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/uvicorn/protocols/http/h11_impl.py\", line 394, in run_asgi\r\n result = await app(self.scope, self.receive, self.send)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/uvicorn/middleware/proxy_headers.py\", line 45, in __call__\r\n return await self.app(scope, receive, send)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/applications.py\", line 112, in __call__\r\n await self.middleware_stack(scope, receive, send)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/middleware/errors.py\", line 181, in __call__\r\n raise exc from None\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/middleware/errors.py\", line 159, in __call__\r\n await self.app(scope, receive, _send)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/exceptions.py\", line 82, in __call__\r\n raise exc from None\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/exceptions.py\", line 71, in __call__\r\n await self.app(scope, receive, sender)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/routing.py\", line 582, in __call__\r\n await route.handle(scope, receive, send)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/routing.py\", line 392, in handle\r\n await self.app(scope, receive, send)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/staticfiles.py\", line 97, in __call__\r\n response = await self.get_response(path, scope)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/staticfiles.py\", line 114, in get_response\r\n full_path, stat_result = await self.lookup_path(path)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/starlette/staticfiles.py\", line 154, in lookup_path\r\n stat_result = await aio_stat(full_path)\r\n File \"/Users/xkhorasan/programs/test_starlette/venv/lib/python3.9/site-packages/aiofiles/os.py\", line 13, in run\r\n return await loop.run_in_executor(executor, pfunc)\r\n File \"/usr/local/Cellar/[email protected]/3.9.1_2/Frameworks/Python.framework/Versions/3.9/lib/python3.9/concurrent/futures/thread.py\", line 52, in run\r\n result = self.fn(*self.args, **self.kwargs)\r\nNotADirectoryError: [Errno 20] Not a directory: '/Users/xkhorasan/programs/test_starlette/static/sample.txt/foo'\r\n```\r\n\r\n### Environment\r\n\r\n- OS: macOS\r\n- Python version: 3.9.1\r\n- Starlette version: 0.14.1\r\n\r\n### Additional context\r\n\r\n<!-- Any additional information that can help understanding the problem.\r\n\r\nEg. linked issues, or a description of what you were trying to achieve. -->\r\n\r\nApache HTTP Server and Nginx treat this case (access existing file as directory) as \"404 Not Found\".\r\nSamples:\r\n* Apache HTTP Server: https://httpd.apache.org/images/httpd_logo_wide_new.png/foo\r\n* Nginx: https://nginx.org/download/nginx-1.18.0.tar.gz/foo\nReturn 404 Not Found for accessing file as directory pattern\nfixes #1123\n", "before_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\n\nimport anyio\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.responses import (\n FileResponse,\n PlainTextResponse,\n RedirectResponse,\n Response,\n)\nfrom starlette.types import Receive, Scope, Send\n\nPathLike = typing.Union[str, \"os.PathLike[str]\"]\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: PathLike = None,\n packages: typing.List[str] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not os.path.isdir(directory):\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self, directory: PathLike = None, packages: typing.List[str] = None\n ) -> typing.List[PathLike]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert (\n spec.origin is not None\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n package_directory = os.path.normpath(\n os.path.join(spec.origin, \"..\", \"statics\")\n )\n assert os.path.isdir(\n package_directory\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n directories.append(package_directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> str:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path seperators, and any '..', '.' components removed.\n \"\"\"\n return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n return PlainTextResponse(\"Method Not Allowed\", status_code=405)\n\n full_path, stat_result = await self.lookup_path(path)\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n full_path, stat_result = await self.lookup_path(index_path)\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await self.lookup_path(\"404.html\")\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n return FileResponse(\n full_path,\n stat_result=stat_result,\n method=scope[\"method\"],\n status_code=404,\n )\n\n return PlainTextResponse(\"Not Found\", status_code=404)\n\n async def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.realpath(os.path.join(directory, path))\n directory = os.path.realpath(directory)\n if os.path.commonprefix([full_path, directory]) != directory:\n # Don't allow misbehaving clients to break out of the static files\n # directory.\n continue\n try:\n stat_result = await anyio.to_thread.run_sync(os.stat, full_path)\n return full_path, stat_result\n except FileNotFoundError:\n pass\n return \"\", None\n\n def file_response(\n self,\n full_path: PathLike,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}], "after_files": [{"content": "import importlib.util\nimport os\nimport stat\nimport typing\nfrom email.utils import parsedate\n\nimport anyio\n\nfrom starlette.datastructures import URL, Headers\nfrom starlette.exceptions import HTTPException\nfrom starlette.responses import FileResponse, RedirectResponse, Response\nfrom starlette.types import Receive, Scope, Send\n\nPathLike = typing.Union[str, \"os.PathLike[str]\"]\n\n\nclass NotModifiedResponse(Response):\n NOT_MODIFIED_HEADERS = (\n \"cache-control\",\n \"content-location\",\n \"date\",\n \"etag\",\n \"expires\",\n \"vary\",\n )\n\n def __init__(self, headers: Headers):\n super().__init__(\n status_code=304,\n headers={\n name: value\n for name, value in headers.items()\n if name in self.NOT_MODIFIED_HEADERS\n },\n )\n\n\nclass StaticFiles:\n def __init__(\n self,\n *,\n directory: PathLike = None,\n packages: typing.List[str] = None,\n html: bool = False,\n check_dir: bool = True,\n ) -> None:\n self.directory = directory\n self.packages = packages\n self.all_directories = self.get_directories(directory, packages)\n self.html = html\n self.config_checked = False\n if check_dir and directory is not None and not os.path.isdir(directory):\n raise RuntimeError(f\"Directory '{directory}' does not exist\")\n\n def get_directories(\n self, directory: PathLike = None, packages: typing.List[str] = None\n ) -> typing.List[PathLike]:\n \"\"\"\n Given `directory` and `packages` arguments, return a list of all the\n directories that should be used for serving static files from.\n \"\"\"\n directories = []\n if directory is not None:\n directories.append(directory)\n\n for package in packages or []:\n spec = importlib.util.find_spec(package)\n assert spec is not None, f\"Package {package!r} could not be found.\"\n assert (\n spec.origin is not None\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n package_directory = os.path.normpath(\n os.path.join(spec.origin, \"..\", \"statics\")\n )\n assert os.path.isdir(\n package_directory\n ), f\"Directory 'statics' in package {package!r} could not be found.\"\n directories.append(package_directory)\n\n return directories\n\n async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:\n \"\"\"\n The ASGI entry point.\n \"\"\"\n assert scope[\"type\"] == \"http\"\n\n if not self.config_checked:\n await self.check_config()\n self.config_checked = True\n\n path = self.get_path(scope)\n response = await self.get_response(path, scope)\n await response(scope, receive, send)\n\n def get_path(self, scope: Scope) -> str:\n \"\"\"\n Given the ASGI scope, return the `path` string to serve up,\n with OS specific path seperators, and any '..', '.' components removed.\n \"\"\"\n return os.path.normpath(os.path.join(*scope[\"path\"].split(\"/\")))\n\n async def get_response(self, path: str, scope: Scope) -> Response:\n \"\"\"\n Returns an HTTP response, given the incoming path, method and request headers.\n \"\"\"\n if scope[\"method\"] not in (\"GET\", \"HEAD\"):\n raise HTTPException(status_code=405)\n\n try:\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, path\n )\n except (FileNotFoundError, NotADirectoryError):\n if self.html:\n # Check for '404.html' if we're in HTML mode.\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, \"404.html\"\n )\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n return FileResponse(\n full_path,\n stat_result=stat_result,\n method=scope[\"method\"],\n status_code=404,\n )\n raise HTTPException(status_code=404)\n except PermissionError:\n raise HTTPException(status_code=401)\n except OSError:\n raise\n\n if stat_result and stat.S_ISREG(stat_result.st_mode):\n # We have a static file to serve.\n return self.file_response(full_path, stat_result, scope)\n\n elif stat_result and stat.S_ISDIR(stat_result.st_mode) and self.html:\n # We're in HTML mode, and have got a directory URL.\n # Check if we have 'index.html' file to serve.\n index_path = os.path.join(path, \"index.html\")\n full_path, stat_result = await anyio.to_thread.run_sync(\n self.lookup_path, index_path\n )\n if stat_result is not None and stat.S_ISREG(stat_result.st_mode):\n if not scope[\"path\"].endswith(\"/\"):\n # Directory URLs should redirect to always end in \"/\".\n url = URL(scope=scope)\n url = url.replace(path=url.path + \"/\")\n return RedirectResponse(url=url)\n return self.file_response(full_path, stat_result, scope)\n\n raise HTTPException(status_code=404)\n\n def lookup_path(\n self, path: str\n ) -> typing.Tuple[str, typing.Optional[os.stat_result]]:\n for directory in self.all_directories:\n full_path = os.path.realpath(os.path.join(directory, path))\n directory = os.path.realpath(directory)\n if os.path.commonprefix([full_path, directory]) != directory:\n # Don't allow misbehaving clients to break out of the static files\n # directory.\n continue\n return full_path, os.stat(full_path)\n return \"\", None\n\n def file_response(\n self,\n full_path: PathLike,\n stat_result: os.stat_result,\n scope: Scope,\n status_code: int = 200,\n ) -> Response:\n method = scope[\"method\"]\n request_headers = Headers(scope=scope)\n\n response = FileResponse(\n full_path, status_code=status_code, stat_result=stat_result, method=method\n )\n if self.is_not_modified(response.headers, request_headers):\n return NotModifiedResponse(response.headers)\n return response\n\n async def check_config(self) -> None:\n \"\"\"\n Perform a one-off configuration check that StaticFiles is actually\n pointed at a directory, so that we can raise loud errors rather than\n just returning 404 responses.\n \"\"\"\n if self.directory is None:\n return\n\n try:\n stat_result = await anyio.to_thread.run_sync(os.stat, self.directory)\n except FileNotFoundError:\n raise RuntimeError(\n f\"StaticFiles directory '{self.directory}' does not exist.\"\n )\n if not (stat.S_ISDIR(stat_result.st_mode) or stat.S_ISLNK(stat_result.st_mode)):\n raise RuntimeError(\n f\"StaticFiles path '{self.directory}' is not a directory.\"\n )\n\n def is_not_modified(\n self, response_headers: Headers, request_headers: Headers\n ) -> bool:\n \"\"\"\n Given the request and response headers, return `True` if an HTTP\n \"Not Modified\" response could be returned instead.\n \"\"\"\n try:\n if_none_match = request_headers[\"if-none-match\"]\n etag = response_headers[\"etag\"]\n if if_none_match == etag:\n return True\n except KeyError:\n pass\n\n try:\n if_modified_since = parsedate(request_headers[\"if-modified-since\"])\n last_modified = parsedate(response_headers[\"last-modified\"])\n if (\n if_modified_since is not None\n and last_modified is not None\n and if_modified_since >= last_modified\n ):\n return True\n except KeyError:\n pass\n\n return False\n", "path": "starlette/staticfiles.py"}]}
| 4,062 | 890 |
gh_patches_debug_3648
|
rasdani/github-patches
|
git_diff
|
joke2k__faker-1132
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
invalid pt_BR cellphone numbers being generated
* Faker version: 4.0.2
* OS: Ubuntu 16.04.6 LTS
If I got [MSISDN](https://en.wikipedia.org/wiki/MSISDN) right, and it is possible I did it wrong since I know nothing about telecom, they are just meant to cellphones and not landline phones. In Brazil cellphones started now to have a 9 in front of its digits. This was implemented by @rodrigondec on 941e06693ff8771d715d2f9f37d79a7f1b8fa8f4 but he added `5511########` on `msisdn_formats`.
If I got the mobile and not landline thing right all the following lines are generating invalid cellphone numbers:
```
'5511########',
'5521########',
'5531########',
'5541########',
'5551########',
'5561########',
'5571########',
'5581########',
'5584########',
```
### Steps to reproduce
1. Instantiate faker: `faker = Faker()`
2. call `len(faker.msisdn)`
### Expected behavior
The length should always return 13 for pt_BR locales.
From ANATEL, the telecom national agency in Brazil: https://www.anatel.gov.br/Portal/exibirPortalPaginaEspecial.do;jsessionid=4CF5489B6943AFF3E2BDA192CC1B5220.site1?org.apache.struts.taglib.html.TOKEN=bbe01b15d1c58d2f938580db5547cb8e&acao=carregaPasta&codItemCanal=1722&pastaSelecionada=2831
> 1. Por que os números dos telefones celulares terão o nono dígito?
> Os números dos telefones celulares estão recebendo mais um dígito para atender à crescente demanda pelo serviço móvel no Brasil(....)
> 2. O nono dígito será adicionado aos números de todo o Brasil?
> O nono dígito será implementado em todo o País até o fim de 2016(...)
Translates to:
1. Why the cell phone numbers will have a 9th digit?
The cell phone numbers are receiving one more digit to address the demand growth of mobile service in Brazil...
2. The 9th digit will be added to all numbers in Brazil?
The 9th digit will be implemented in the whole country by the end of 2016...
### Actual behavior
the length sometimes is 12
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `faker/providers/phone_number/pt_BR/__init__.py`
Content:
```
1 from .. import Provider as PhoneNumberProvider
2
3
4 class Provider(PhoneNumberProvider):
5 formats = (
6 '+55 (011) #### ####',
7 '+55 (021) #### ####',
8 '+55 (031) #### ####',
9 '+55 (041) #### ####',
10 '+55 (051) #### ####',
11 '+55 (061) #### ####',
12 '+55 (071) #### ####',
13 '+55 (081) #### ####',
14 '+55 (084) #### ####',
15 '+55 11 #### ####',
16 '+55 21 #### ####',
17 '+55 31 #### ####',
18 '+55 41 #### ####',
19 '+55 51 ### ####',
20 '+55 61 #### ####',
21 '+55 71 #### ####',
22 '+55 81 #### ####',
23 '+55 84 #### ####',
24 '+55 (011) ####-####',
25 '+55 (021) ####-####',
26 '+55 (031) ####-####',
27 '+55 (041) ####-####',
28 '+55 (051) ####-####',
29 '+55 (061) ####-####',
30 '+55 (071) ####-####',
31 '+55 (081) ####-####',
32 '+55 (084) ####-####',
33 '+55 11 ####-####',
34 '+55 21 ####-####',
35 '+55 31 ####-####',
36 '+55 41 ####-####',
37 '+55 51 ### ####',
38 '+55 61 ####-####',
39 '+55 71 ####-####',
40 '+55 81 ####-####',
41 '+55 84 ####-####',
42 '(011) #### ####',
43 '(021) #### ####',
44 '(031) #### ####',
45 '(041) #### ####',
46 '(051) #### ####',
47 '(061) #### ####',
48 '(071) #### ####',
49 '(081) #### ####',
50 '(084) #### ####',
51 '11 #### ####',
52 '21 #### ####',
53 '31 #### ####',
54 '41 #### ####',
55 '51 ### ####',
56 '61 #### ####',
57 '71 #### ####',
58 '81 #### ####',
59 '84 #### ####',
60 '(011) ####-####',
61 '(021) ####-####',
62 '(031) ####-####',
63 '(041) ####-####',
64 '(051) ####-####',
65 '(061) ####-####',
66 '(071) ####-####',
67 '(081) ####-####',
68 '(084) ####-####',
69 '11 ####-####',
70 '21 ####-####',
71 '31 ####-####',
72 '41 ####-####',
73 '51 ### ####',
74 '61 ####-####',
75 '71 ####-####',
76 '81 ####-####',
77 '84 ####-####',
78 )
79
80 msisdn_formats = (
81 '5511########',
82 '5521########',
83 '5531########',
84 '5541########',
85 '5551########',
86 '5561########',
87 '5571########',
88 '5581########',
89 '5584########',
90 '55119########',
91 '55219########',
92 '55319########',
93 '55419########',
94 '55519########',
95 '55619########',
96 '55719########',
97 '55819########',
98 '55849########',
99 )
100
101 cellphone_formats = (
102 '+55 ## 9#### ####',
103 '+55 ## 9 #### ####',
104 '+55 (0##) 9#### ####',
105 '+55 (##) 9#### ####',
106 '+55 (##) 9 #### ####',
107 '+55 ## 9####-####',
108 '+55 ## 9 ####-####',
109 '+55 (0##) 9####-####',
110 '+55 (##) 9####-####',
111 '+55 (##) 9 ####-####',
112 )
113
114 def cellphone_number(self):
115 pattern = self.random_element(self.cellphone_formats)
116 return self.numerify(self.generator.parse(pattern))
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/faker/providers/phone_number/pt_BR/__init__.py b/faker/providers/phone_number/pt_BR/__init__.py
--- a/faker/providers/phone_number/pt_BR/__init__.py
+++ b/faker/providers/phone_number/pt_BR/__init__.py
@@ -78,15 +78,6 @@
)
msisdn_formats = (
- '5511########',
- '5521########',
- '5531########',
- '5541########',
- '5551########',
- '5561########',
- '5571########',
- '5581########',
- '5584########',
'55119########',
'55219########',
'55319########',
|
{"golden_diff": "diff --git a/faker/providers/phone_number/pt_BR/__init__.py b/faker/providers/phone_number/pt_BR/__init__.py\n--- a/faker/providers/phone_number/pt_BR/__init__.py\n+++ b/faker/providers/phone_number/pt_BR/__init__.py\n@@ -78,15 +78,6 @@\n )\n \n msisdn_formats = (\n- '5511########',\n- '5521########',\n- '5531########',\n- '5541########',\n- '5551########',\n- '5561########',\n- '5571########',\n- '5581########',\n- '5584########',\n '55119########',\n '55219########',\n '55319########',\n", "issue": "invalid pt_BR cellphone numbers being generated\n* Faker version: 4.0.2\r\n* OS: Ubuntu 16.04.6 LTS\r\n\r\nIf I got [MSISDN](https://en.wikipedia.org/wiki/MSISDN) right, and it is possible I did it wrong since I know nothing about telecom, they are just meant to cellphones and not landline phones. In Brazil cellphones started now to have a 9 in front of its digits. This was implemented by @rodrigondec on 941e06693ff8771d715d2f9f37d79a7f1b8fa8f4 but he added `5511########` on `msisdn_formats`.\r\n\r\nIf I got the mobile and not landline thing right all the following lines are generating invalid cellphone numbers:\r\n```\r\n'5511########',\r\n'5521########',\r\n'5531########',\r\n'5541########',\r\n'5551########',\r\n'5561########',\r\n'5571########',\r\n'5581########',\r\n'5584########',\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. Instantiate faker: `faker = Faker()`\r\n2. call `len(faker.msisdn)`\r\n\r\n### Expected behavior\r\n\r\nThe length should always return 13 for pt_BR locales.\r\n\r\nFrom ANATEL, the telecom national agency in Brazil: https://www.anatel.gov.br/Portal/exibirPortalPaginaEspecial.do;jsessionid=4CF5489B6943AFF3E2BDA192CC1B5220.site1?org.apache.struts.taglib.html.TOKEN=bbe01b15d1c58d2f938580db5547cb8e&acao=carregaPasta&codItemCanal=1722&pastaSelecionada=2831\r\n> 1. Por que os n\u00fameros dos telefones celulares ter\u00e3o o nono d\u00edgito?\r\n> Os n\u00fameros dos telefones celulares est\u00e3o recebendo mais um d\u00edgito para atender \u00e0 crescente demanda pelo servi\u00e7o m\u00f3vel no Brasil(....)\r\n> 2. O nono d\u00edgito ser\u00e1 adicionado aos n\u00fameros de todo o Brasil?\r\n> O nono d\u00edgito ser\u00e1 implementado em todo o Pa\u00eds at\u00e9 o fim de 2016(...)\r\n\r\nTranslates to:\r\n1. Why the cell phone numbers will have a 9th digit?\r\nThe cell phone numbers are receiving one more digit to address the demand growth of mobile service in Brazil...\r\n2. The 9th digit will be added to all numbers in Brazil?\r\nThe 9th digit will be implemented in the whole country by the end of 2016...\r\n\r\n### Actual behavior\r\n\r\nthe length sometimes is 12\r\n\n", "before_files": [{"content": "from .. import Provider as PhoneNumberProvider\n\n\nclass Provider(PhoneNumberProvider):\n formats = (\n '+55 (011) #### ####',\n '+55 (021) #### ####',\n '+55 (031) #### ####',\n '+55 (041) #### ####',\n '+55 (051) #### ####',\n '+55 (061) #### ####',\n '+55 (071) #### ####',\n '+55 (081) #### ####',\n '+55 (084) #### ####',\n '+55 11 #### ####',\n '+55 21 #### ####',\n '+55 31 #### ####',\n '+55 41 #### ####',\n '+55 51 ### ####',\n '+55 61 #### ####',\n '+55 71 #### ####',\n '+55 81 #### ####',\n '+55 84 #### ####',\n '+55 (011) ####-####',\n '+55 (021) ####-####',\n '+55 (031) ####-####',\n '+55 (041) ####-####',\n '+55 (051) ####-####',\n '+55 (061) ####-####',\n '+55 (071) ####-####',\n '+55 (081) ####-####',\n '+55 (084) ####-####',\n '+55 11 ####-####',\n '+55 21 ####-####',\n '+55 31 ####-####',\n '+55 41 ####-####',\n '+55 51 ### ####',\n '+55 61 ####-####',\n '+55 71 ####-####',\n '+55 81 ####-####',\n '+55 84 ####-####',\n '(011) #### ####',\n '(021) #### ####',\n '(031) #### ####',\n '(041) #### ####',\n '(051) #### ####',\n '(061) #### ####',\n '(071) #### ####',\n '(081) #### ####',\n '(084) #### ####',\n '11 #### ####',\n '21 #### ####',\n '31 #### ####',\n '41 #### ####',\n '51 ### ####',\n '61 #### ####',\n '71 #### ####',\n '81 #### ####',\n '84 #### ####',\n '(011) ####-####',\n '(021) ####-####',\n '(031) ####-####',\n '(041) ####-####',\n '(051) ####-####',\n '(061) ####-####',\n '(071) ####-####',\n '(081) ####-####',\n '(084) ####-####',\n '11 ####-####',\n '21 ####-####',\n '31 ####-####',\n '41 ####-####',\n '51 ### ####',\n '61 ####-####',\n '71 ####-####',\n '81 ####-####',\n '84 ####-####',\n )\n\n msisdn_formats = (\n '5511########',\n '5521########',\n '5531########',\n '5541########',\n '5551########',\n '5561########',\n '5571########',\n '5581########',\n '5584########',\n '55119########',\n '55219########',\n '55319########',\n '55419########',\n '55519########',\n '55619########',\n '55719########',\n '55819########',\n '55849########',\n )\n\n cellphone_formats = (\n '+55 ## 9#### ####',\n '+55 ## 9 #### ####',\n '+55 (0##) 9#### ####',\n '+55 (##) 9#### ####',\n '+55 (##) 9 #### ####',\n '+55 ## 9####-####',\n '+55 ## 9 ####-####',\n '+55 (0##) 9####-####',\n '+55 (##) 9####-####',\n '+55 (##) 9 ####-####',\n )\n\n def cellphone_number(self):\n pattern = self.random_element(self.cellphone_formats)\n return self.numerify(self.generator.parse(pattern))\n", "path": "faker/providers/phone_number/pt_BR/__init__.py"}], "after_files": [{"content": "from .. import Provider as PhoneNumberProvider\n\n\nclass Provider(PhoneNumberProvider):\n formats = (\n '+55 (011) #### ####',\n '+55 (021) #### ####',\n '+55 (031) #### ####',\n '+55 (041) #### ####',\n '+55 (051) #### ####',\n '+55 (061) #### ####',\n '+55 (071) #### ####',\n '+55 (081) #### ####',\n '+55 (084) #### ####',\n '+55 11 #### ####',\n '+55 21 #### ####',\n '+55 31 #### ####',\n '+55 41 #### ####',\n '+55 51 ### ####',\n '+55 61 #### ####',\n '+55 71 #### ####',\n '+55 81 #### ####',\n '+55 84 #### ####',\n '+55 (011) ####-####',\n '+55 (021) ####-####',\n '+55 (031) ####-####',\n '+55 (041) ####-####',\n '+55 (051) ####-####',\n '+55 (061) ####-####',\n '+55 (071) ####-####',\n '+55 (081) ####-####',\n '+55 (084) ####-####',\n '+55 11 ####-####',\n '+55 21 ####-####',\n '+55 31 ####-####',\n '+55 41 ####-####',\n '+55 51 ### ####',\n '+55 61 ####-####',\n '+55 71 ####-####',\n '+55 81 ####-####',\n '+55 84 ####-####',\n '(011) #### ####',\n '(021) #### ####',\n '(031) #### ####',\n '(041) #### ####',\n '(051) #### ####',\n '(061) #### ####',\n '(071) #### ####',\n '(081) #### ####',\n '(084) #### ####',\n '11 #### ####',\n '21 #### ####',\n '31 #### ####',\n '41 #### ####',\n '51 ### ####',\n '61 #### ####',\n '71 #### ####',\n '81 #### ####',\n '84 #### ####',\n '(011) ####-####',\n '(021) ####-####',\n '(031) ####-####',\n '(041) ####-####',\n '(051) ####-####',\n '(061) ####-####',\n '(071) ####-####',\n '(081) ####-####',\n '(084) ####-####',\n '11 ####-####',\n '21 ####-####',\n '31 ####-####',\n '41 ####-####',\n '51 ### ####',\n '61 ####-####',\n '71 ####-####',\n '81 ####-####',\n '84 ####-####',\n )\n\n msisdn_formats = (\n '55119########',\n '55219########',\n '55319########',\n '55419########',\n '55519########',\n '55619########',\n '55719########',\n '55819########',\n '55849########',\n )\n\n cellphone_formats = (\n '+55 ## 9#### ####',\n '+55 ## 9 #### ####',\n '+55 (0##) 9#### ####',\n '+55 (##) 9#### ####',\n '+55 (##) 9 #### ####',\n '+55 ## 9####-####',\n '+55 ## 9 ####-####',\n '+55 (0##) 9####-####',\n '+55 (##) 9####-####',\n '+55 (##) 9 ####-####',\n )\n\n def cellphone_number(self):\n pattern = self.random_element(self.cellphone_formats)\n return self.numerify(self.generator.parse(pattern))\n", "path": "faker/providers/phone_number/pt_BR/__init__.py"}]}
| 2,190 | 195 |
gh_patches_debug_23607
|
rasdani/github-patches
|
git_diff
|
vaexio__vaex-217
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pandas dependency
We now depends on Pandas:
https://github.com/vaexio/vaex/blob/255ccbc192d54c619a273de21a05f919da8ffadf/packages/vaex-core/vaex/formatting.py
Introduced in https://github.com/vaexio/vaex/pull/192
We should not depend on pandas, it is not a dependency of vaex-core and should not become, we might also grow to large to run on AWS Lambda.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `packages/vaex-core/vaex/formatting.py`
Content:
```
1 import numpy as np
2 import numbers
3 import six
4 import pandas as pd
5
6
7 MAX_LENGTH = 50
8
9
10 def _format_value(value):
11 if isinstance(value, six.string_types):
12 value = str(value)
13 elif isinstance(value, bytes):
14 value = repr(value)
15 elif isinstance(value, np.ma.core.MaskedConstant):
16 value = str(value)
17 if isinstance(value, np.datetime64):
18 value = str(pd.to_datetime(value))
19 if isinstance(value, np.timedelta64):
20 value = str(pd.to_timedelta(value))
21 elif not isinstance(value, numbers.Number):
22 value = str(value)
23 if isinstance(value, float):
24 value = repr(value)
25 if isinstance(value, (str, bytes)):
26 if len(value) > MAX_LENGTH:
27 value = repr(value[:MAX_LENGTH-3])[:-1] + '...'
28 return value
29
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py
--- a/packages/vaex-core/vaex/formatting.py
+++ b/packages/vaex-core/vaex/formatting.py
@@ -1,7 +1,7 @@
import numpy as np
import numbers
import six
-import pandas as pd
+import datetime
MAX_LENGTH = 50
@@ -15,9 +15,24 @@
elif isinstance(value, np.ma.core.MaskedConstant):
value = str(value)
if isinstance(value, np.datetime64):
- value = str(pd.to_datetime(value))
+ if np.isnat(value):
+ value = 'NaT'
+ else:
+ value = ' '.join(str(value).split('T'))
if isinstance(value, np.timedelta64):
- value = str(pd.to_timedelta(value))
+ if np.isnat(value):
+ value = 'NaT'
+ else:
+ tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))
+ ms = tmp.microseconds
+ s = np.mod(tmp.seconds, 60)
+ m = np.mod(tmp.seconds//60, 60)
+ h = tmp.seconds // 3600
+ d = tmp.days
+ if ms:
+ value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))
+ else:
+ value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))
elif not isinstance(value, numbers.Number):
value = str(value)
if isinstance(value, float):
|
{"golden_diff": "diff --git a/packages/vaex-core/vaex/formatting.py b/packages/vaex-core/vaex/formatting.py\n--- a/packages/vaex-core/vaex/formatting.py\n+++ b/packages/vaex-core/vaex/formatting.py\n@@ -1,7 +1,7 @@\n import numpy as np\n import numbers\n import six\n-import pandas as pd\n+import datetime\n \n \n MAX_LENGTH = 50\n@@ -15,9 +15,24 @@\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n- value = str(pd.to_datetime(value))\n+ if np.isnat(value):\n+ value = 'NaT'\n+ else:\n+ value = ' '.join(str(value).split('T'))\n if isinstance(value, np.timedelta64):\n- value = str(pd.to_timedelta(value))\n+ if np.isnat(value):\n+ value = 'NaT'\n+ else:\n+ tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))\n+ ms = tmp.microseconds\n+ s = np.mod(tmp.seconds, 60)\n+ m = np.mod(tmp.seconds//60, 60)\n+ h = tmp.seconds // 3600\n+ d = tmp.days\n+ if ms:\n+ value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n+ else:\n+ value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n", "issue": "Pandas dependency\nWe now depends on Pandas:\r\nhttps://github.com/vaexio/vaex/blob/255ccbc192d54c619a273de21a05f919da8ffadf/packages/vaex-core/vaex/formatting.py\r\n\r\nIntroduced in https://github.com/vaexio/vaex/pull/192\r\n\r\nWe should not depend on pandas, it is not a dependency of vaex-core and should not become, we might also grow to large to run on AWS Lambda.\n", "before_files": [{"content": "import numpy as np\nimport numbers\nimport six\nimport pandas as pd\n\n\nMAX_LENGTH = 50\n\n\ndef _format_value(value):\n if isinstance(value, six.string_types):\n value = str(value)\n elif isinstance(value, bytes):\n value = repr(value)\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n value = str(pd.to_datetime(value))\n if isinstance(value, np.timedelta64):\n value = str(pd.to_timedelta(value))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n value = repr(value)\n if isinstance(value, (str, bytes)):\n if len(value) > MAX_LENGTH:\n value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n return value\n", "path": "packages/vaex-core/vaex/formatting.py"}], "after_files": [{"content": "import numpy as np\nimport numbers\nimport six\nimport datetime\n\n\nMAX_LENGTH = 50\n\n\ndef _format_value(value):\n if isinstance(value, six.string_types):\n value = str(value)\n elif isinstance(value, bytes):\n value = repr(value)\n elif isinstance(value, np.ma.core.MaskedConstant):\n value = str(value)\n if isinstance(value, np.datetime64):\n if np.isnat(value):\n value = 'NaT'\n else:\n value = ' '.join(str(value).split('T'))\n if isinstance(value, np.timedelta64):\n if np.isnat(value):\n value = 'NaT'\n else:\n tmp = datetime.timedelta(seconds=value / np.timedelta64(1, 's'))\n ms = tmp.microseconds\n s = np.mod(tmp.seconds, 60)\n m = np.mod(tmp.seconds//60, 60)\n h = tmp.seconds // 3600\n d = tmp.days\n if ms:\n value = str('%i days %+02i:%02i:%02i.%i' % (d,h,m,s,ms))\n else:\n value = str('%i days %+02i:%02i:%02i' % (d,h,m,s))\n elif not isinstance(value, numbers.Number):\n value = str(value)\n if isinstance(value, float):\n value = repr(value)\n if isinstance(value, (str, bytes)):\n if len(value) > MAX_LENGTH:\n value = repr(value[:MAX_LENGTH-3])[:-1] + '...'\n return value\n", "path": "packages/vaex-core/vaex/formatting.py"}]}
| 622 | 387 |
gh_patches_debug_176
|
rasdani/github-patches
|
git_diff
|
searxng__searxng-471
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[SIMPLE THEME]: Reddit search engine breaks Simple Theme "Image" tab Style.
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Powered by searxng - 1.0.0-999-e4025cd1
**How did you install SearXNG?**
SearXNG docker image with docker-compose.
**What happened?**
<!-- A clear and concise description of what the bug is. -->
If you turn on reddit search engine from settings.yml it gets enabled for several categories including "Images." However, things get a little funny with the images tab as far as the formatting goes. As you can see in the image below, the results don't encompass the entire canvas but only a portion like they do with "General" tab. I believe this might be due to reddit returning search results vs images when you're in the image tab (image 2 below). You'll see these search results if you keep scrolling down.
**How To Reproduce**
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
1. Make sure reddit search engine is turned on for images category in settings or globally via settings.yml.
2. Search for something and go to images tab.
3. Notice the behavior where images only take up the left-hand side of the canvas.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
Images should use the entire canvas like they do when reddit search engine is turned off (image 3) and search should only include images or gifs etc.
**Screenshots & Logs**
<!-- If applicable, add screenshots, logs to help explain your problem. -->



**Alternatives**
Remove Reddit search engine from images category by default so it doesn't get enabled from settings.yml.
[SIMPLE THEME]: Reddit search engine breaks Simple Theme "Image" tab Style.
<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->
**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**
Powered by searxng - 1.0.0-999-e4025cd1
**How did you install SearXNG?**
SearXNG docker image with docker-compose.
**What happened?**
<!-- A clear and concise description of what the bug is. -->
If you turn on reddit search engine from settings.yml it gets enabled for several categories including "Images." However, things get a little funny with the images tab as far as the formatting goes. As you can see in the image below, the results don't encompass the entire canvas but only a portion like they do with "General" tab. I believe this might be due to reddit returning search results vs images when you're in the image tab (image 2 below). You'll see these search results if you keep scrolling down.
**How To Reproduce**
<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->
1. Make sure reddit search engine is turned on for images category in settings or globally via settings.yml.
2. Search for something and go to images tab.
3. Notice the behavior where images only take up the left-hand side of the canvas.
**Expected behavior**
<!-- A clear and concise description of what you expected to happen. -->
Images should use the entire canvas like they do when reddit search engine is turned off (image 3) and search should only include images or gifs etc.
**Screenshots & Logs**
<!-- If applicable, add screenshots, logs to help explain your problem. -->



**Alternatives**
Remove Reddit search engine from images category by default so it doesn't get enabled from settings.yml.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `searx/engines/reddit.py`
Content:
```
1 # SPDX-License-Identifier: AGPL-3.0-or-later
2 """
3 Reddit
4 """
5
6 import json
7 from datetime import datetime
8 from urllib.parse import urlencode, urljoin, urlparse
9
10 # about
11 about = {
12 "website": 'https://www.reddit.com/',
13 "wikidata_id": 'Q1136',
14 "official_api_documentation": 'https://www.reddit.com/dev/api',
15 "use_official_api": True,
16 "require_api_key": False,
17 "results": 'JSON',
18 }
19
20 # engine dependent config
21 categories = ['general', 'images', 'news', 'social media']
22 page_size = 25
23
24 # search-url
25 base_url = 'https://www.reddit.com/'
26 search_url = base_url + 'search.json?{query}'
27
28
29 # do search-request
30 def request(query, params):
31 query = urlencode({'q': query, 'limit': page_size})
32 params['url'] = search_url.format(query=query)
33
34 return params
35
36
37 # get response from search-request
38 def response(resp):
39 img_results = []
40 text_results = []
41
42 search_results = json.loads(resp.text)
43
44 # return empty array if there are no results
45 if 'data' not in search_results:
46 return []
47
48 posts = search_results.get('data', {}).get('children', [])
49
50 # process results
51 for post in posts:
52 data = post['data']
53
54 # extract post information
55 params = {
56 'url': urljoin(base_url, data['permalink']),
57 'title': data['title']
58 }
59
60 # if thumbnail field contains a valid URL, we need to change template
61 thumbnail = data['thumbnail']
62 url_info = urlparse(thumbnail)
63 # netloc & path
64 if url_info[1] != '' and url_info[2] != '':
65 params['img_src'] = data['url']
66 params['thumbnail_src'] = thumbnail
67 params['template'] = 'images.html'
68 img_results.append(params)
69 else:
70 created = datetime.fromtimestamp(data['created_utc'])
71 content = data['selftext']
72 if len(content) > 500:
73 content = content[:500] + '...'
74 params['content'] = content
75 params['publishedDate'] = created
76 text_results.append(params)
77
78 # show images first and text results second
79 return img_results + text_results
80
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py
--- a/searx/engines/reddit.py
+++ b/searx/engines/reddit.py
@@ -18,7 +18,7 @@
}
# engine dependent config
-categories = ['general', 'images', 'news', 'social media']
+categories = ['social media']
page_size = 25
# search-url
|
{"golden_diff": "diff --git a/searx/engines/reddit.py b/searx/engines/reddit.py\n--- a/searx/engines/reddit.py\n+++ b/searx/engines/reddit.py\n@@ -18,7 +18,7 @@\n }\n \n # engine dependent config\n-categories = ['general', 'images', 'news', 'social media']\n+categories = ['social media']\n page_size = 25\n \n # search-url\n", "issue": "[SIMPLE THEME]: Reddit search engine breaks Simple Theme \"Image\" tab Style.\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nPowered by searxng - 1.0.0-999-e4025cd1\r\n\r\n**How did you install SearXNG?**\r\nSearXNG docker image with docker-compose.\r\n\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\nIf you turn on reddit search engine from settings.yml it gets enabled for several categories including \"Images.\" However, things get a little funny with the images tab as far as the formatting goes. As you can see in the image below, the results don't encompass the entire canvas but only a portion like they do with \"General\" tab. I believe this might be due to reddit returning search results vs images when you're in the image tab (image 2 below). You'll see these search results if you keep scrolling down.\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n1. Make sure reddit search engine is turned on for images category in settings or globally via settings.yml.\r\n2. Search for something and go to images tab.\r\n3. Notice the behavior where images only take up the left-hand side of the canvas.\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nImages should use the entire canvas like they do when reddit search engine is turned off (image 3) and search should only include images or gifs etc.\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n\r\n\r\n\r\n**Alternatives**\r\nRemove Reddit search engine from images category by default so it doesn't get enabled from settings.yml.\n[SIMPLE THEME]: Reddit search engine breaks Simple Theme \"Image\" tab Style.\n<!-- PLEASE FILL THESE FIELDS, IT REALLY HELPS THE MAINTAINERS OF SearXNG -->\r\n\r\n**Version of SearXNG, commit number if you are using on master branch and stipulate if you forked SearXNG**\r\nPowered by searxng - 1.0.0-999-e4025cd1\r\n\r\n**How did you install SearXNG?**\r\nSearXNG docker image with docker-compose.\r\n\r\n**What happened?**\r\n<!-- A clear and concise description of what the bug is. -->\r\nIf you turn on reddit search engine from settings.yml it gets enabled for several categories including \"Images.\" However, things get a little funny with the images tab as far as the formatting goes. As you can see in the image below, the results don't encompass the entire canvas but only a portion like they do with \"General\" tab. I believe this might be due to reddit returning search results vs images when you're in the image tab (image 2 below). You'll see these search results if you keep scrolling down.\r\n\r\n**How To Reproduce**\r\n<!-- How can we reproduce this issue? (as minimally and as precisely as possible) -->\r\n1. Make sure reddit search engine is turned on for images category in settings or globally via settings.yml.\r\n2. Search for something and go to images tab.\r\n3. Notice the behavior where images only take up the left-hand side of the canvas.\r\n\r\n**Expected behavior**\r\n<!-- A clear and concise description of what you expected to happen. -->\r\nImages should use the entire canvas like they do when reddit search engine is turned off (image 3) and search should only include images or gifs etc.\r\n\r\n**Screenshots & Logs**\r\n<!-- If applicable, add screenshots, logs to help explain your problem. -->\r\n\r\n\r\n\r\n\r\n**Alternatives**\r\nRemove Reddit search engine from images category by default so it doesn't get enabled from settings.yml.\n", "before_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Reddit\n\"\"\"\n\nimport json\nfrom datetime import datetime\nfrom urllib.parse import urlencode, urljoin, urlparse\n\n# about\nabout = {\n \"website\": 'https://www.reddit.com/',\n \"wikidata_id\": 'Q1136',\n \"official_api_documentation\": 'https://www.reddit.com/dev/api',\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\n# engine dependent config\ncategories = ['general', 'images', 'news', 'social media']\npage_size = 25\n\n# search-url\nbase_url = 'https://www.reddit.com/'\nsearch_url = base_url + 'search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query, 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = data['selftext']\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}], "after_files": [{"content": "# SPDX-License-Identifier: AGPL-3.0-or-later\n\"\"\"\n Reddit\n\"\"\"\n\nimport json\nfrom datetime import datetime\nfrom urllib.parse import urlencode, urljoin, urlparse\n\n# about\nabout = {\n \"website\": 'https://www.reddit.com/',\n \"wikidata_id\": 'Q1136',\n \"official_api_documentation\": 'https://www.reddit.com/dev/api',\n \"use_official_api\": True,\n \"require_api_key\": False,\n \"results\": 'JSON',\n}\n\n# engine dependent config\ncategories = ['social media']\npage_size = 25\n\n# search-url\nbase_url = 'https://www.reddit.com/'\nsearch_url = base_url + 'search.json?{query}'\n\n\n# do search-request\ndef request(query, params):\n query = urlencode({'q': query, 'limit': page_size})\n params['url'] = search_url.format(query=query)\n\n return params\n\n\n# get response from search-request\ndef response(resp):\n img_results = []\n text_results = []\n\n search_results = json.loads(resp.text)\n\n # return empty array if there are no results\n if 'data' not in search_results:\n return []\n\n posts = search_results.get('data', {}).get('children', [])\n\n # process results\n for post in posts:\n data = post['data']\n\n # extract post information\n params = {\n 'url': urljoin(base_url, data['permalink']),\n 'title': data['title']\n }\n\n # if thumbnail field contains a valid URL, we need to change template\n thumbnail = data['thumbnail']\n url_info = urlparse(thumbnail)\n # netloc & path\n if url_info[1] != '' and url_info[2] != '':\n params['img_src'] = data['url']\n params['thumbnail_src'] = thumbnail\n params['template'] = 'images.html'\n img_results.append(params)\n else:\n created = datetime.fromtimestamp(data['created_utc'])\n content = data['selftext']\n if len(content) > 500:\n content = content[:500] + '...'\n params['content'] = content\n params['publishedDate'] = created\n text_results.append(params)\n\n # show images first and text results second\n return img_results + text_results\n", "path": "searx/engines/reddit.py"}]}
| 2,109 | 100 |
gh_patches_debug_23234
|
rasdani/github-patches
|
git_diff
|
streamlink__streamlink-1394
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
July 17 Douyu.com error
0.7.0
streamlink https://www.douyu.com/17732 source -o "PATH & FILENAME"
[cli][info] Found matching plugin douyutv for URL https://www.douyu.com/17732
error: Unable to open URL: https://www.douyu.com/lapi/live/getPlay/17732 (500 Server Error: Internal Server Error for url: https://www.douyu.com/lapi/live/getPlay/17732)
@fozzysec @steven7851
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/streamlink/plugins/douyutv.py`
Content:
```
1 import re
2 import time
3 import hashlib
4
5 from requests.adapters import HTTPAdapter
6
7 from streamlink.plugin import Plugin
8 from streamlink.plugin.api import http, validate, useragents
9 from streamlink.stream import HTTPStream, HLSStream, RTMPStream
10
11 API_URL = "https://capi.douyucdn.cn/api/v1/{0}&auth={1}"
12 VAPI_URL = "https://vmobile.douyu.com/video/getInfo?vid={0}"
13 API_SECRET = "Y237pxTx2In5ayGz"
14 SHOW_STATUS_ONLINE = 1
15 SHOW_STATUS_OFFLINE = 2
16 STREAM_WEIGHTS = {
17 "low": 540,
18 "medium": 720,
19 "source": 1080
20 }
21
22 _url_re = re.compile(r"""
23 http(s)?://
24 (?:
25 (?P<subdomain>.+)
26 \.
27 )?
28 douyu.com/
29 (?:
30 show/(?P<vid>[^/&?]+)|
31 (?P<channel>[^/&?]+)
32 )
33 """, re.VERBOSE)
34
35 _room_id_re = re.compile(r'"room_id\\*"\s*:\s*(\d+),')
36 _room_id_alt_re = re.compile(r'data-onlineid=(\d+)')
37
38 _room_id_schema = validate.Schema(
39 validate.all(
40 validate.transform(_room_id_re.search),
41 validate.any(
42 None,
43 validate.all(
44 validate.get(1),
45 validate.transform(int)
46 )
47 )
48 )
49 )
50
51 _room_id_alt_schema = validate.Schema(
52 validate.all(
53 validate.transform(_room_id_alt_re.search),
54 validate.any(
55 None,
56 validate.all(
57 validate.get(1),
58 validate.transform(int)
59 )
60 )
61 )
62 )
63
64 _room_schema = validate.Schema(
65 {
66 "data": validate.any(None, {
67 "show_status": validate.all(
68 validate.text,
69 validate.transform(int)
70 ),
71 "rtmp_url": validate.text,
72 "rtmp_live": validate.text,
73 "hls_url": validate.text,
74 "rtmp_multi_bitrate": validate.all(
75 validate.any([], {
76 validate.text: validate.text
77 }),
78 validate.transform(dict)
79 )
80 })
81 },
82 validate.get("data")
83 )
84
85 _vapi_schema = validate.Schema(
86 {
87 "data": validate.any(None, {
88 "video_url": validate.text
89 })
90 },
91 validate.get("data")
92 )
93
94
95 class Douyutv(Plugin):
96 @classmethod
97 def can_handle_url(cls, url):
98 return _url_re.match(url)
99
100 @classmethod
101 def stream_weight(cls, stream):
102 if stream in STREAM_WEIGHTS:
103 return STREAM_WEIGHTS[stream], "douyutv"
104 return Plugin.stream_weight(stream)
105
106 def _get_streams(self):
107 match = _url_re.match(self.url)
108 subdomain = match.group("subdomain")
109
110 http.verify = False
111 http.mount('https://', HTTPAdapter(max_retries=99))
112
113 if subdomain == 'v':
114 vid = match.group("vid")
115 headers = {
116 "User-Agent": useragents.ANDROID,
117 "X-Requested-With": "XMLHttpRequest"
118 }
119 res = http.get(VAPI_URL.format(vid), headers=headers)
120 room = http.json(res, schema=_vapi_schema)
121 yield "source", HLSStream(self.session, room["video_url"])
122 return
123
124 channel = match.group("channel")
125 try:
126 channel = int(channel)
127 except ValueError:
128 channel = http.get(self.url, schema=_room_id_schema)
129 if channel is None:
130 channel = http.get(self.url, schema=_room_id_alt_schema)
131
132 http.headers.update({'User-Agent': useragents.ANDROID})
133 cdns = ["ws", "tct", "ws2", "dl"]
134 ts = int(time.time())
135 suffix = "room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}".format(channel, cdns[0], ts)
136 sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()
137
138 res = http.get(API_URL.format(suffix, sign))
139 room = http.json(res, schema=_room_schema)
140 if not room:
141 self.logger.info("Not a valid room url.")
142 return
143
144 if room["show_status"] != SHOW_STATUS_ONLINE:
145 self.logger.info("Stream currently unavailable.")
146 return
147
148 url = room["hls_url"]
149 yield "source", HLSStream(self.session, url)
150
151 url = "{room[rtmp_url]}/{room[rtmp_live]}".format(room=room)
152 if 'rtmp:' in url:
153 stream = RTMPStream(self.session, {
154 "rtmp": url,
155 "live": True
156 })
157 yield "source", stream
158 else:
159 yield "source", HTTPStream(self.session, url)
160
161 multi_streams = {
162 "middle": "low",
163 "middle2": "medium"
164 }
165 for name, url in room["rtmp_multi_bitrate"].items():
166 url = "{room[rtmp_url]}/{url}".format(room=room, url=url)
167 name = multi_streams[name]
168 if 'rtmp:' in url:
169 stream = RTMPStream(self.session, {
170 "rtmp": url,
171 "live": True
172 })
173 yield name, stream
174 else:
175 yield name, HTTPStream(self.session, url)
176
177
178 __plugin__ = Douyutv
179
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/streamlink/plugins/douyutv.py b/src/streamlink/plugins/douyutv.py
--- a/src/streamlink/plugins/douyutv.py
+++ b/src/streamlink/plugins/douyutv.py
@@ -10,7 +10,7 @@
API_URL = "https://capi.douyucdn.cn/api/v1/{0}&auth={1}"
VAPI_URL = "https://vmobile.douyu.com/video/getInfo?vid={0}"
-API_SECRET = "Y237pxTx2In5ayGz"
+API_SECRET = "zNzMV1y4EMxOHS6I5WKm"
SHOW_STATUS_ONLINE = 1
SHOW_STATUS_OFFLINE = 2
STREAM_WEIGHTS = {
@@ -129,10 +129,10 @@
if channel is None:
channel = http.get(self.url, schema=_room_id_alt_schema)
- http.headers.update({'User-Agent': useragents.ANDROID})
+ http.headers.update({'User-Agent': useragents.WINDOWS_PHONE_8})
cdns = ["ws", "tct", "ws2", "dl"]
ts = int(time.time())
- suffix = "room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}".format(channel, cdns[0], ts)
+ suffix = "room/{0}?aid=wp&cdn={1}&client_sys=wp&time={2}".format(channel, cdns[0], ts)
sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()
res = http.get(API_URL.format(suffix, sign))
|
{"golden_diff": "diff --git a/src/streamlink/plugins/douyutv.py b/src/streamlink/plugins/douyutv.py\n--- a/src/streamlink/plugins/douyutv.py\n+++ b/src/streamlink/plugins/douyutv.py\n@@ -10,7 +10,7 @@\n \n API_URL = \"https://capi.douyucdn.cn/api/v1/{0}&auth={1}\"\n VAPI_URL = \"https://vmobile.douyu.com/video/getInfo?vid={0}\"\n-API_SECRET = \"Y237pxTx2In5ayGz\"\n+API_SECRET = \"zNzMV1y4EMxOHS6I5WKm\"\n SHOW_STATUS_ONLINE = 1\n SHOW_STATUS_OFFLINE = 2\n STREAM_WEIGHTS = {\n@@ -129,10 +129,10 @@\n if channel is None:\n channel = http.get(self.url, schema=_room_id_alt_schema)\n \n- http.headers.update({'User-Agent': useragents.ANDROID})\n+ http.headers.update({'User-Agent': useragents.WINDOWS_PHONE_8})\n cdns = [\"ws\", \"tct\", \"ws2\", \"dl\"]\n ts = int(time.time())\n- suffix = \"room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}\".format(channel, cdns[0], ts)\n+ suffix = \"room/{0}?aid=wp&cdn={1}&client_sys=wp&time={2}\".format(channel, cdns[0], ts)\n sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()\n \n res = http.get(API_URL.format(suffix, sign))\n", "issue": "July 17 Douyu.com error\n0.7.0\r\nstreamlink https://www.douyu.com/17732 source -o \"PATH & FILENAME\"\r\n[cli][info] Found matching plugin douyutv for URL https://www.douyu.com/17732\r\nerror: Unable to open URL: https://www.douyu.com/lapi/live/getPlay/17732 (500 Server Error: Internal Server Error for url: https://www.douyu.com/lapi/live/getPlay/17732)\r\n@fozzysec @steven7851\n", "before_files": [{"content": "import re\nimport time\nimport hashlib\n\nfrom requests.adapters import HTTPAdapter\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate, useragents\nfrom streamlink.stream import HTTPStream, HLSStream, RTMPStream\n\nAPI_URL = \"https://capi.douyucdn.cn/api/v1/{0}&auth={1}\"\nVAPI_URL = \"https://vmobile.douyu.com/video/getInfo?vid={0}\"\nAPI_SECRET = \"Y237pxTx2In5ayGz\"\nSHOW_STATUS_ONLINE = 1\nSHOW_STATUS_OFFLINE = 2\nSTREAM_WEIGHTS = {\n \"low\": 540,\n \"medium\": 720,\n \"source\": 1080\n }\n\n_url_re = re.compile(r\"\"\"\n http(s)?://\n (?:\n (?P<subdomain>.+)\n \\.\n )?\n douyu.com/\n (?:\n show/(?P<vid>[^/&?]+)|\n (?P<channel>[^/&?]+)\n )\n\"\"\", re.VERBOSE)\n\n_room_id_re = re.compile(r'\"room_id\\\\*\"\\s*:\\s*(\\d+),')\n_room_id_alt_re = re.compile(r'data-onlineid=(\\d+)')\n\n_room_id_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_id_alt_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_alt_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"show_status\": validate.all(\n validate.text,\n validate.transform(int)\n ),\n \"rtmp_url\": validate.text,\n \"rtmp_live\": validate.text,\n \"hls_url\": validate.text,\n \"rtmp_multi_bitrate\": validate.all(\n validate.any([], {\n validate.text: validate.text\n }),\n validate.transform(dict)\n )\n })\n },\n validate.get(\"data\")\n)\n\n_vapi_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"video_url\": validate.text\n })\n },\n validate.get(\"data\")\n)\n\n\nclass Douyutv(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n if stream in STREAM_WEIGHTS:\n return STREAM_WEIGHTS[stream], \"douyutv\"\n return Plugin.stream_weight(stream)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n subdomain = match.group(\"subdomain\")\n\n http.verify = False\n http.mount('https://', HTTPAdapter(max_retries=99))\n\n if subdomain == 'v':\n vid = match.group(\"vid\")\n headers = {\n \"User-Agent\": useragents.ANDROID,\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n res = http.get(VAPI_URL.format(vid), headers=headers)\n room = http.json(res, schema=_vapi_schema)\n yield \"source\", HLSStream(self.session, room[\"video_url\"])\n return\n\n channel = match.group(\"channel\")\n try:\n channel = int(channel)\n except ValueError:\n channel = http.get(self.url, schema=_room_id_schema)\n if channel is None:\n channel = http.get(self.url, schema=_room_id_alt_schema)\n\n http.headers.update({'User-Agent': useragents.ANDROID})\n cdns = [\"ws\", \"tct\", \"ws2\", \"dl\"]\n ts = int(time.time())\n suffix = \"room/{0}?aid=androidhd1&cdn={1}&client_sys=android&time={2}\".format(channel, cdns[0], ts)\n sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()\n\n res = http.get(API_URL.format(suffix, sign))\n room = http.json(res, schema=_room_schema)\n if not room:\n self.logger.info(\"Not a valid room url.\")\n return\n\n if room[\"show_status\"] != SHOW_STATUS_ONLINE:\n self.logger.info(\"Stream currently unavailable.\")\n return\n\n url = room[\"hls_url\"]\n yield \"source\", HLSStream(self.session, url)\n\n url = \"{room[rtmp_url]}/{room[rtmp_live]}\".format(room=room)\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield \"source\", stream\n else:\n yield \"source\", HTTPStream(self.session, url)\n\n multi_streams = {\n \"middle\": \"low\",\n \"middle2\": \"medium\"\n }\n for name, url in room[\"rtmp_multi_bitrate\"].items():\n url = \"{room[rtmp_url]}/{url}\".format(room=room, url=url)\n name = multi_streams[name]\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield name, stream\n else:\n yield name, HTTPStream(self.session, url)\n\n\n__plugin__ = Douyutv\n", "path": "src/streamlink/plugins/douyutv.py"}], "after_files": [{"content": "import re\nimport time\nimport hashlib\n\nfrom requests.adapters import HTTPAdapter\n\nfrom streamlink.plugin import Plugin\nfrom streamlink.plugin.api import http, validate, useragents\nfrom streamlink.stream import HTTPStream, HLSStream, RTMPStream\n\nAPI_URL = \"https://capi.douyucdn.cn/api/v1/{0}&auth={1}\"\nVAPI_URL = \"https://vmobile.douyu.com/video/getInfo?vid={0}\"\nAPI_SECRET = \"zNzMV1y4EMxOHS6I5WKm\"\nSHOW_STATUS_ONLINE = 1\nSHOW_STATUS_OFFLINE = 2\nSTREAM_WEIGHTS = {\n \"low\": 540,\n \"medium\": 720,\n \"source\": 1080\n }\n\n_url_re = re.compile(r\"\"\"\n http(s)?://\n (?:\n (?P<subdomain>.+)\n \\.\n )?\n douyu.com/\n (?:\n show/(?P<vid>[^/&?]+)|\n (?P<channel>[^/&?]+)\n )\n\"\"\", re.VERBOSE)\n\n_room_id_re = re.compile(r'\"room_id\\\\*\"\\s*:\\s*(\\d+),')\n_room_id_alt_re = re.compile(r'data-onlineid=(\\d+)')\n\n_room_id_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_id_alt_schema = validate.Schema(\n validate.all(\n validate.transform(_room_id_alt_re.search),\n validate.any(\n None,\n validate.all(\n validate.get(1),\n validate.transform(int)\n )\n )\n )\n)\n\n_room_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"show_status\": validate.all(\n validate.text,\n validate.transform(int)\n ),\n \"rtmp_url\": validate.text,\n \"rtmp_live\": validate.text,\n \"hls_url\": validate.text,\n \"rtmp_multi_bitrate\": validate.all(\n validate.any([], {\n validate.text: validate.text\n }),\n validate.transform(dict)\n )\n })\n },\n validate.get(\"data\")\n)\n\n_vapi_schema = validate.Schema(\n {\n \"data\": validate.any(None, {\n \"video_url\": validate.text\n })\n },\n validate.get(\"data\")\n)\n\n\nclass Douyutv(Plugin):\n @classmethod\n def can_handle_url(cls, url):\n return _url_re.match(url)\n\n @classmethod\n def stream_weight(cls, stream):\n if stream in STREAM_WEIGHTS:\n return STREAM_WEIGHTS[stream], \"douyutv\"\n return Plugin.stream_weight(stream)\n\n def _get_streams(self):\n match = _url_re.match(self.url)\n subdomain = match.group(\"subdomain\")\n\n http.verify = False\n http.mount('https://', HTTPAdapter(max_retries=99))\n\n if subdomain == 'v':\n vid = match.group(\"vid\")\n headers = {\n \"User-Agent\": useragents.ANDROID,\n \"X-Requested-With\": \"XMLHttpRequest\"\n }\n res = http.get(VAPI_URL.format(vid), headers=headers)\n room = http.json(res, schema=_vapi_schema)\n yield \"source\", HLSStream(self.session, room[\"video_url\"])\n return\n\n channel = match.group(\"channel\")\n try:\n channel = int(channel)\n except ValueError:\n channel = http.get(self.url, schema=_room_id_schema)\n if channel is None:\n channel = http.get(self.url, schema=_room_id_alt_schema)\n\n http.headers.update({'User-Agent': useragents.WINDOWS_PHONE_8})\n cdns = [\"ws\", \"tct\", \"ws2\", \"dl\"]\n ts = int(time.time())\n suffix = \"room/{0}?aid=wp&cdn={1}&client_sys=wp&time={2}\".format(channel, cdns[0], ts)\n sign = hashlib.md5((suffix + API_SECRET).encode()).hexdigest()\n\n res = http.get(API_URL.format(suffix, sign))\n room = http.json(res, schema=_room_schema)\n if not room:\n self.logger.info(\"Not a valid room url.\")\n return\n\n if room[\"show_status\"] != SHOW_STATUS_ONLINE:\n self.logger.info(\"Stream currently unavailable.\")\n return\n\n url = room[\"hls_url\"]\n yield \"source\", HLSStream(self.session, url)\n\n url = \"{room[rtmp_url]}/{room[rtmp_live]}\".format(room=room)\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield \"source\", stream\n else:\n yield \"source\", HTTPStream(self.session, url)\n\n multi_streams = {\n \"middle\": \"low\",\n \"middle2\": \"medium\"\n }\n for name, url in room[\"rtmp_multi_bitrate\"].items():\n url = \"{room[rtmp_url]}/{url}\".format(room=room, url=url)\n name = multi_streams[name]\n if 'rtmp:' in url:\n stream = RTMPStream(self.session, {\n \"rtmp\": url,\n \"live\": True\n })\n yield name, stream\n else:\n yield name, HTTPStream(self.session, url)\n\n\n__plugin__ = Douyutv\n", "path": "src/streamlink/plugins/douyutv.py"}]}
| 2,033 | 369 |
gh_patches_debug_8623
|
rasdani/github-patches
|
git_diff
|
archlinux__archinstall-262
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Awesome profile installation failed with no such file or directory on xdg-mime


Resolve #261 and related issues
Closes #262.
🚨 PR Guidelines:
# New features *(v2.2.0)*
Merge new features in to `torxed-v2.2.0`.<br>
This branch is designated for potential breaking changes, added complexity and new functionality.
# Bug fixes *(v2.1.4)*
Merge against `master` for bug fixes and anything that improves stability and quality of life.<br>
This excludes:
* New functionality
* Added complexity
* Breaking changes
Any changes to `master` automatically gets pulled in to `torxed-v2.2.0` to avoid merge hell.
# Describe your PR
If the changes has been discussed in an Issue, please tag it so we can backtrace from the Issue later on.<br>
If the PR is larger than ~20 lines, please describe it here unless described in an issue.
# Testing
Any new feature or stability improvement should be tested if possible.
Please follow the test instructions at the bottom of the README.
*These PR guidelines will change after 2021-05-01, which is when `v2.1.4` gets onto the new ISO*
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `profiles/desktop.py`
Content:
```
1 # A desktop environment selector.
2
3 import archinstall, os
4
5 is_top_level_profile = True
6
7 # New way of defining packages for a profile, which is iterable and can be used out side
8 # of the profile to get a list of "what packages will be installed".
9 __packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
10
11 def _prep_function(*args, **kwargs):
12 """
13 Magic function called by the importing installer
14 before continuing any further. It also avoids executing any
15 other code in this stage. So it's a safe way to ask the user
16 for more input before any other installer steps start.
17 """
18
19 supported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']
20 desktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')
21
22 # Temporarily store the selected desktop profile
23 # in a session-safe location, since this module will get reloaded
24 # the next time it gets executed.
25 archinstall.storage['_desktop_profile'] = desktop
26
27 profile = archinstall.Profile(None, desktop)
28 # Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.
29 with profile.load_instructions(namespace=f"{desktop}.py") as imported:
30 if hasattr(imported, '_prep_function'):
31 return imported._prep_function()
32 else:
33 print(f"Deprecated (??): {desktop} profile has no _prep_function() anymore")
34
35 if __name__ == 'desktop':
36 """
37 This "profile" is a meta-profile.
38 There are no desktop-specific steps, it simply routes
39 the installer to whichever desktop environment/window manager was chosen.
40
41 Maybe in the future, a network manager or similar things *could* be added here.
42 We should honor that Arch Linux does not officially endorse a desktop-setup, nor is
43 it trying to be a turn-key desktop distribution.
44
45 There are plenty of desktop-turn-key-solutions based on Arch Linux,
46 this is therefore just a helper to get started
47 """
48
49 # Install common packages for all desktop environments
50 installation.add_additional_packages(__packages__)
51
52 # TODO: Remove magic variable 'installation' and place it
53 # in archinstall.storage or archinstall.session/archinstall.installation
54 installation.install_profile(archinstall.storage['_desktop_profile'])
55
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/profiles/desktop.py b/profiles/desktop.py
--- a/profiles/desktop.py
+++ b/profiles/desktop.py
@@ -6,7 +6,7 @@
# New way of defining packages for a profile, which is iterable and can be used out side
# of the profile to get a list of "what packages will be installed".
-__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']
+__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']
def _prep_function(*args, **kwargs):
"""
|
{"golden_diff": "diff --git a/profiles/desktop.py b/profiles/desktop.py\n--- a/profiles/desktop.py\n+++ b/profiles/desktop.py\n@@ -6,7 +6,7 @@\n \n # New way of defining packages for a profile, which is iterable and can be used out side\n # of the profile to get a list of \"what packages will be installed\".\n-__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n+__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']\n \n def _prep_function(*args, **kwargs):\n \t\"\"\"\n", "issue": "Awesome profile installation failed with no such file or directory on xdg-mime\n\r\n\r\n\nResolve #261 and related issues\nCloses #262.\r\n\r\n\ud83d\udea8 PR Guidelines:\r\n\r\n# New features *(v2.2.0)*\r\n\r\nMerge new features in to `torxed-v2.2.0`.<br>\r\nThis branch is designated for potential breaking changes, added complexity and new functionality.\r\n\r\n# Bug fixes *(v2.1.4)*\r\n\r\nMerge against `master` for bug fixes and anything that improves stability and quality of life.<br>\r\nThis excludes:\r\n * New functionality\r\n * Added complexity\r\n * Breaking changes\r\n\r\nAny changes to `master` automatically gets pulled in to `torxed-v2.2.0` to avoid merge hell.\r\n\r\n# Describe your PR\r\n\r\nIf the changes has been discussed in an Issue, please tag it so we can backtrace from the Issue later on.<br>\r\nIf the PR is larger than ~20 lines, please describe it here unless described in an issue.\r\n\r\n# Testing\r\n\r\nAny new feature or stability improvement should be tested if possible.\r\nPlease follow the test instructions at the bottom of the README.\r\n\r\n*These PR guidelines will change after 2021-05-01, which is when `v2.1.4` gets onto the new ISO*\r\n\n", "before_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\t\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\t\n\t# Install common packages for all desktop environments\n\tinstallation.add_additional_packages(__packages__)\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n\n", "path": "profiles/desktop.py"}], "after_files": [{"content": "# A desktop environment selector.\n\nimport archinstall, os\n\nis_top_level_profile = True\n\n# New way of defining packages for a profile, which is iterable and can be used out side\n# of the profile to get a list of \"what packages will be installed\".\n__packages__ = ['nano', 'vim', 'openssh', 'htop', 'wget', 'iwd', 'wireless_tools', 'wpa_supplicant', 'smartmontools', 'xdg-utils']\n\ndef _prep_function(*args, **kwargs):\n\t\"\"\"\n\tMagic function called by the importing installer\n\tbefore continuing any further. It also avoids executing any\n\tother code in this stage. So it's a safe way to ask the user\n\tfor more input before any other installer steps start.\n\t\"\"\"\n\n\tsupported_desktops = ['gnome', 'kde', 'awesome', 'sway', 'cinnamon', 'xfce4', 'lxqt', 'i3', 'budgie']\n\tdesktop = archinstall.generic_select(supported_desktops, 'Select your desired desktop environment: ')\n\t\n\t# Temporarily store the selected desktop profile\n\t# in a session-safe location, since this module will get reloaded\n\t# the next time it gets executed.\n\tarchinstall.storage['_desktop_profile'] = desktop\n\n\tprofile = archinstall.Profile(None, desktop)\n\t# Loading the instructions with a custom namespace, ensures that a __name__ comparison is never triggered.\n\twith profile.load_instructions(namespace=f\"{desktop}.py\") as imported:\n\t\tif hasattr(imported, '_prep_function'):\n\t\t\treturn imported._prep_function()\n\t\telse:\n\t\t\tprint(f\"Deprecated (??): {desktop} profile has no _prep_function() anymore\")\n\nif __name__ == 'desktop':\n\t\"\"\"\n\tThis \"profile\" is a meta-profile.\n\tThere are no desktop-specific steps, it simply routes\n\tthe installer to whichever desktop environment/window manager was chosen.\n\n\tMaybe in the future, a network manager or similar things *could* be added here.\n\tWe should honor that Arch Linux does not officially endorse a desktop-setup, nor is\n\tit trying to be a turn-key desktop distribution.\n\n\tThere are plenty of desktop-turn-key-solutions based on Arch Linux,\n\tthis is therefore just a helper to get started\n\t\"\"\"\n\t\n\t# Install common packages for all desktop environments\n\tinstallation.add_additional_packages(__packages__)\n\n\t# TODO: Remove magic variable 'installation' and place it\n\t# in archinstall.storage or archinstall.session/archinstall.installation\n\tinstallation.install_profile(archinstall.storage['_desktop_profile'])\n\n", "path": "profiles/desktop.py"}]}
| 1,367 | 178 |
gh_patches_debug_64233
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-1717
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
LightGBMTunerCV not working for regression objective
The script https://github.com/optuna/optuna/blob/master/examples/lightgbm_tuner_cv.py runs just fine as it is. However, I get a `KeyError: 'mse-mean'` if I change the `objective` to `regression` and `metric` to `mse`. Similar erro happens to other metrics as well when the `objective` is set to `regression`.
## Environment
- Optuna version: 2.0.0
- Python version: 3.7
- OS: MacOS Catalina
## Error messages, stack traces, or logs
```
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-11-7753103b8251> in <module>
15 )
16
---> 17 tuner.run()
18
19 print("Best score:", tuner.best_score)
/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in run(self)
461 self.sample_train_set()
462
--> 463 self.tune_feature_fraction()
464 self.tune_num_leaves()
465 self.tune_bagging()
/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in tune_feature_fraction(self, n_trials)
486
487 sampler = optuna.samplers.GridSampler({param_name: param_values})
--> 488 self._tune_params([param_name], len(param_values), sampler, "feature_fraction")
489
490 def tune_num_leaves(self, n_trials: int = 20) -> None:
/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in _tune_params(self, target_param_names, n_trials, sampler, step_name)
567 timeout=_timeout,
568 catch=(),
--> 569 callbacks=self._optuna_callbacks,
570 )
571
/usr/local/lib/python3.7/site-packages/optuna/study.py in optimize(self, func, n_trials, timeout, n_jobs, catch, callbacks, gc_after_trial, show_progress_bar)
290 if n_jobs == 1:
291 self._optimize_sequential(
--> 292 func, n_trials, timeout, catch, callbacks, gc_after_trial, None
293 )
294 else:
/usr/local/lib/python3.7/site-packages/optuna/study.py in _optimize_sequential(self, func, n_trials, timeout, catch, callbacks, gc_after_trial, time_start)
652 break
653
--> 654 self._run_trial_and_callbacks(func, catch, callbacks, gc_after_trial)
655
656 self._progress_bar.update((datetime.datetime.now() - time_start).total_seconds())
/usr/local/lib/python3.7/site-packages/optuna/study.py in _run_trial_and_callbacks(self, func, catch, callbacks, gc_after_trial)
683 # type: (...) -> None
684
--> 685 trial = self._run_trial(func, catch, gc_after_trial)
686 if callbacks is not None:
687 frozen_trial = copy.deepcopy(self._storage.get_trial(trial._trial_id))
/usr/local/lib/python3.7/site-packages/optuna/study.py in _run_trial(self, func, catch, gc_after_trial)
707
708 try:
--> 709 result = func(trial)
710 except exceptions.TrialPruned as e:
711 message = "Trial {} pruned. {}".format(trial_number, str(e))
/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in __call__(self, trial)
302 cv_results = lgb.cv(self.lgbm_params, self.train_set, **self.lgbm_kwargs)
303
--> 304 val_scores = self._get_cv_scores(cv_results)
305 val_score = val_scores[-1]
306 elapsed_secs = time.time() - start_time
/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in _get_cv_scores(self, cv_results)
292
293 metric = self._get_metric_for_objective()
--> 294 val_scores = cv_results["{}-mean".format(metric)]
295 return val_scores
296
KeyError: 'mse-mean'
```
## Steps to reproduce
1. Run [this script](https://github.com/optuna/optuna/blob/master/examples/lightgbm_tuner_cv.py) with objective = regression and metric = mse.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `optuna/integration/_lightgbm_tuner/alias.py`
Content:
```
1 from typing import Any
2 from typing import Dict
3 from typing import List # NOQA
4
5
6 _ALIAS_GROUP_LIST = [
7 {"param_name": "bagging_fraction", "alias_names": ["sub_row", "subsample", "bagging"]},
8 {"param_name": "learning_rate", "alias_names": ["shrinkage_rate", "eta"]},
9 {
10 "param_name": "min_data_in_leaf",
11 "alias_names": ["min_data_per_leaf", "min_data", "min_child_samples"],
12 },
13 {
14 "param_name": "min_sum_hessian_in_leaf",
15 "alias_names": [
16 "min_sum_hessian_per_leaf",
17 "min_sum_hessian",
18 "min_hessian",
19 "min_child_weight",
20 ],
21 },
22 {"param_name": "bagging_freq", "alias_names": ["subsample_freq"]},
23 {"param_name": "feature_fraction", "alias_names": ["sub_feature", "colsample_bytree"]},
24 {"param_name": "lambda_l1", "alias_names": ["reg_alpha"]},
25 {"param_name": "lambda_l2", "alias_names": ["reg_lambda", "lambda"]},
26 {"param_name": "min_gain_to_split", "alias_names": ["min_split_gain"]},
27 ] # type: List[Dict[str, Any]]
28
29
30 def _handling_alias_parameters(lgbm_params: Dict[str, Any]) -> None:
31 """Handling alias parameters."""
32
33 for alias_group in _ALIAS_GROUP_LIST:
34 param_name = alias_group["param_name"]
35 alias_names = alias_group["alias_names"]
36
37 for alias_name in alias_names:
38 if alias_name in lgbm_params:
39 lgbm_params[param_name] = lgbm_params[alias_name]
40 del lgbm_params[alias_name]
41
42
43 _ALIAS_METRIC_LIST = [
44 {
45 "metric_name": "ndcg",
46 "alias_names": [
47 "lambdarank",
48 "rank_xendcg",
49 "xendcg",
50 "xe_ndcg",
51 "xe_ndcg_mart",
52 "xendcg_mart",
53 ],
54 },
55 {"metric_name": "map", "alias_names": ["mean_average_precision"]},
56 ] # type: List[Dict[str, Any]]
57
58
59 def _handling_alias_metrics(lgbm_params: Dict[str, Any]) -> None:
60 """Handling alias metrics."""
61
62 if "metric" not in lgbm_params.keys():
63 return
64
65 for metric in _ALIAS_METRIC_LIST:
66 metric_name = metric["metric_name"]
67 alias_names = metric["alias_names"]
68
69 for alias_name in alias_names:
70 if lgbm_params["metric"] == alias_name:
71 lgbm_params["metric"] = metric_name
72 break
73
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/optuna/integration/_lightgbm_tuner/alias.py b/optuna/integration/_lightgbm_tuner/alias.py
--- a/optuna/integration/_lightgbm_tuner/alias.py
+++ b/optuna/integration/_lightgbm_tuner/alias.py
@@ -53,6 +53,10 @@
],
},
{"metric_name": "map", "alias_names": ["mean_average_precision"]},
+ {
+ "metric_name": "l2",
+ "alias_names": ["regression", "regression_l2", "l2", "mean_squared_error", "mse"],
+ },
] # type: List[Dict[str, Any]]
|
{"golden_diff": "diff --git a/optuna/integration/_lightgbm_tuner/alias.py b/optuna/integration/_lightgbm_tuner/alias.py\n--- a/optuna/integration/_lightgbm_tuner/alias.py\n+++ b/optuna/integration/_lightgbm_tuner/alias.py\n@@ -53,6 +53,10 @@\n ],\n },\n {\"metric_name\": \"map\", \"alias_names\": [\"mean_average_precision\"]},\n+ {\n+ \"metric_name\": \"l2\",\n+ \"alias_names\": [\"regression\", \"regression_l2\", \"l2\", \"mean_squared_error\", \"mse\"],\n+ },\n ] # type: List[Dict[str, Any]]\n", "issue": "LightGBMTunerCV not working for regression objective\nThe script https://github.com/optuna/optuna/blob/master/examples/lightgbm_tuner_cv.py runs just fine as it is. However, I get a `KeyError: 'mse-mean'` if I change the `objective` to `regression` and `metric` to `mse`. Similar erro happens to other metrics as well when the `objective` is set to `regression`.\r\n\r\n## Environment\r\n\r\n- Optuna version: 2.0.0\r\n- Python version: 3.7\r\n- OS: MacOS Catalina\r\n\r\n## Error messages, stack traces, or logs\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nKeyError Traceback (most recent call last)\r\n<ipython-input-11-7753103b8251> in <module>\r\n 15 )\r\n 16 \r\n---> 17 tuner.run()\r\n 18 \r\n 19 print(\"Best score:\", tuner.best_score)\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in run(self)\r\n 461 self.sample_train_set()\r\n 462 \r\n--> 463 self.tune_feature_fraction()\r\n 464 self.tune_num_leaves()\r\n 465 self.tune_bagging()\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in tune_feature_fraction(self, n_trials)\r\n 486 \r\n 487 sampler = optuna.samplers.GridSampler({param_name: param_values})\r\n--> 488 self._tune_params([param_name], len(param_values), sampler, \"feature_fraction\")\r\n 489 \r\n 490 def tune_num_leaves(self, n_trials: int = 20) -> None:\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in _tune_params(self, target_param_names, n_trials, sampler, step_name)\r\n 567 timeout=_timeout,\r\n 568 catch=(),\r\n--> 569 callbacks=self._optuna_callbacks,\r\n 570 )\r\n 571 \r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/study.py in optimize(self, func, n_trials, timeout, n_jobs, catch, callbacks, gc_after_trial, show_progress_bar)\r\n 290 if n_jobs == 1:\r\n 291 self._optimize_sequential(\r\n--> 292 func, n_trials, timeout, catch, callbacks, gc_after_trial, None\r\n 293 )\r\n 294 else:\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/study.py in _optimize_sequential(self, func, n_trials, timeout, catch, callbacks, gc_after_trial, time_start)\r\n 652 break\r\n 653 \r\n--> 654 self._run_trial_and_callbacks(func, catch, callbacks, gc_after_trial)\r\n 655 \r\n 656 self._progress_bar.update((datetime.datetime.now() - time_start).total_seconds())\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/study.py in _run_trial_and_callbacks(self, func, catch, callbacks, gc_after_trial)\r\n 683 # type: (...) -> None\r\n 684 \r\n--> 685 trial = self._run_trial(func, catch, gc_after_trial)\r\n 686 if callbacks is not None:\r\n 687 frozen_trial = copy.deepcopy(self._storage.get_trial(trial._trial_id))\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/study.py in _run_trial(self, func, catch, gc_after_trial)\r\n 707 \r\n 708 try:\r\n--> 709 result = func(trial)\r\n 710 except exceptions.TrialPruned as e:\r\n 711 message = \"Trial {} pruned. {}\".format(trial_number, str(e))\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in __call__(self, trial)\r\n 302 cv_results = lgb.cv(self.lgbm_params, self.train_set, **self.lgbm_kwargs)\r\n 303 \r\n--> 304 val_scores = self._get_cv_scores(cv_results)\r\n 305 val_score = val_scores[-1]\r\n 306 elapsed_secs = time.time() - start_time\r\n\r\n/usr/local/lib/python3.7/site-packages/optuna/integration/_lightgbm_tuner/optimize.py in _get_cv_scores(self, cv_results)\r\n 292 \r\n 293 metric = self._get_metric_for_objective()\r\n--> 294 val_scores = cv_results[\"{}-mean\".format(metric)]\r\n 295 return val_scores\r\n 296 \r\n\r\nKeyError: 'mse-mean'\r\n```\r\n\r\n## Steps to reproduce\r\n\r\n1. Run [this script](https://github.com/optuna/optuna/blob/master/examples/lightgbm_tuner_cv.py) with objective = regression and metric = mse. \r\n\n", "before_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom typing import List # NOQA\n\n\n_ALIAS_GROUP_LIST = [\n {\"param_name\": \"bagging_fraction\", \"alias_names\": [\"sub_row\", \"subsample\", \"bagging\"]},\n {\"param_name\": \"learning_rate\", \"alias_names\": [\"shrinkage_rate\", \"eta\"]},\n {\n \"param_name\": \"min_data_in_leaf\",\n \"alias_names\": [\"min_data_per_leaf\", \"min_data\", \"min_child_samples\"],\n },\n {\n \"param_name\": \"min_sum_hessian_in_leaf\",\n \"alias_names\": [\n \"min_sum_hessian_per_leaf\",\n \"min_sum_hessian\",\n \"min_hessian\",\n \"min_child_weight\",\n ],\n },\n {\"param_name\": \"bagging_freq\", \"alias_names\": [\"subsample_freq\"]},\n {\"param_name\": \"feature_fraction\", \"alias_names\": [\"sub_feature\", \"colsample_bytree\"]},\n {\"param_name\": \"lambda_l1\", \"alias_names\": [\"reg_alpha\"]},\n {\"param_name\": \"lambda_l2\", \"alias_names\": [\"reg_lambda\", \"lambda\"]},\n {\"param_name\": \"min_gain_to_split\", \"alias_names\": [\"min_split_gain\"]},\n] # type: List[Dict[str, Any]]\n\n\ndef _handling_alias_parameters(lgbm_params: Dict[str, Any]) -> None:\n \"\"\"Handling alias parameters.\"\"\"\n\n for alias_group in _ALIAS_GROUP_LIST:\n param_name = alias_group[\"param_name\"]\n alias_names = alias_group[\"alias_names\"]\n\n for alias_name in alias_names:\n if alias_name in lgbm_params:\n lgbm_params[param_name] = lgbm_params[alias_name]\n del lgbm_params[alias_name]\n\n\n_ALIAS_METRIC_LIST = [\n {\n \"metric_name\": \"ndcg\",\n \"alias_names\": [\n \"lambdarank\",\n \"rank_xendcg\",\n \"xendcg\",\n \"xe_ndcg\",\n \"xe_ndcg_mart\",\n \"xendcg_mart\",\n ],\n },\n {\"metric_name\": \"map\", \"alias_names\": [\"mean_average_precision\"]},\n] # type: List[Dict[str, Any]]\n\n\ndef _handling_alias_metrics(lgbm_params: Dict[str, Any]) -> None:\n \"\"\"Handling alias metrics.\"\"\"\n\n if \"metric\" not in lgbm_params.keys():\n return\n\n for metric in _ALIAS_METRIC_LIST:\n metric_name = metric[\"metric_name\"]\n alias_names = metric[\"alias_names\"]\n\n for alias_name in alias_names:\n if lgbm_params[\"metric\"] == alias_name:\n lgbm_params[\"metric\"] = metric_name\n break\n", "path": "optuna/integration/_lightgbm_tuner/alias.py"}], "after_files": [{"content": "from typing import Any\nfrom typing import Dict\nfrom typing import List # NOQA\n\n\n_ALIAS_GROUP_LIST = [\n {\"param_name\": \"bagging_fraction\", \"alias_names\": [\"sub_row\", \"subsample\", \"bagging\"]},\n {\"param_name\": \"learning_rate\", \"alias_names\": [\"shrinkage_rate\", \"eta\"]},\n {\n \"param_name\": \"min_data_in_leaf\",\n \"alias_names\": [\"min_data_per_leaf\", \"min_data\", \"min_child_samples\"],\n },\n {\n \"param_name\": \"min_sum_hessian_in_leaf\",\n \"alias_names\": [\n \"min_sum_hessian_per_leaf\",\n \"min_sum_hessian\",\n \"min_hessian\",\n \"min_child_weight\",\n ],\n },\n {\"param_name\": \"bagging_freq\", \"alias_names\": [\"subsample_freq\"]},\n {\"param_name\": \"feature_fraction\", \"alias_names\": [\"sub_feature\", \"colsample_bytree\"]},\n {\"param_name\": \"lambda_l1\", \"alias_names\": [\"reg_alpha\"]},\n {\"param_name\": \"lambda_l2\", \"alias_names\": [\"reg_lambda\", \"lambda\"]},\n {\"param_name\": \"min_gain_to_split\", \"alias_names\": [\"min_split_gain\"]},\n] # type: List[Dict[str, Any]]\n\n\ndef _handling_alias_parameters(lgbm_params: Dict[str, Any]) -> None:\n \"\"\"Handling alias parameters.\"\"\"\n\n for alias_group in _ALIAS_GROUP_LIST:\n param_name = alias_group[\"param_name\"]\n alias_names = alias_group[\"alias_names\"]\n\n for alias_name in alias_names:\n if alias_name in lgbm_params:\n lgbm_params[param_name] = lgbm_params[alias_name]\n del lgbm_params[alias_name]\n\n\n_ALIAS_METRIC_LIST = [\n {\n \"metric_name\": \"ndcg\",\n \"alias_names\": [\n \"lambdarank\",\n \"rank_xendcg\",\n \"xendcg\",\n \"xe_ndcg\",\n \"xe_ndcg_mart\",\n \"xendcg_mart\",\n ],\n },\n {\"metric_name\": \"map\", \"alias_names\": [\"mean_average_precision\"]},\n {\n \"metric_name\": \"l2\",\n \"alias_names\": [\"regression\", \"regression_l2\", \"l2\", \"mean_squared_error\", \"mse\"],\n },\n] # type: List[Dict[str, Any]]\n\n\ndef _handling_alias_metrics(lgbm_params: Dict[str, Any]) -> None:\n \"\"\"Handling alias metrics.\"\"\"\n\n if \"metric\" not in lgbm_params.keys():\n return\n\n for metric in _ALIAS_METRIC_LIST:\n metric_name = metric[\"metric_name\"]\n alias_names = metric[\"alias_names\"]\n\n for alias_name in alias_names:\n if lgbm_params[\"metric\"] == alias_name:\n lgbm_params[\"metric\"] = metric_name\n break\n", "path": "optuna/integration/_lightgbm_tuner/alias.py"}]}
| 2,152 | 155 |
gh_patches_debug_9456
|
rasdani/github-patches
|
git_diff
|
pypa__pip-4661
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
pip un-vendored support is broken
* Pip version: 9.0.1-465-g841f5dfb
* Python version: 2.7.13
* Operating system: Arch Linux x86_64
### What I've run:
```python
> ./.tox/py27-novendor/bin/pip search test
Traceback (most recent call last):
File "./.tox/py27-novendor/bin/pip", line 7, in <module>
from pip import main
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/__init__.py", line 46, in <module>
from pip.vcs import git, mercurial, subversion, bazaar # noqa
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/vcs/mercurial.py", line 8, in <module>
from pip.download import path_to_url
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/download.py", line 28, in <module>
from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote
ImportError: No module named parse
```
and after fixing that one:
```python
Traceback (most recent call last):
File "./.tox/py27-novendor/bin/pip", line 7, in <module>
from pip import main
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/__init__.py", line 46, in <module>
from pip.vcs import git, mercurial, subversion, bazaar # noqa
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/vcs/subversion.py", line 9, in <module>
from pip.index import Link
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/index.py", line 39, in <module>
from pip.wheel import Wheel, wheel_ext
File ".tox/py27-novendor/lib/python2.7/site-packages/pip/wheel.py", line 21, in <module>
from pip._vendor import pkg_resources, pytoml
ImportError: cannot import name pytoml
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_vendor/__init__.py`
Content:
```
1 """
2 pip._vendor is for vendoring dependencies of pip to prevent needing pip to
3 depend on something external.
4
5 Files inside of pip._vendor should be considered immutable and should only be
6 updated to versions from upstream.
7 """
8 from __future__ import absolute_import
9
10 import glob
11 import os.path
12 import sys
13
14 # Downstream redistributors which have debundled our dependencies should also
15 # patch this value to be true. This will trigger the additional patching
16 # to cause things like "six" to be available as pip.
17 DEBUNDLED = False
18
19 # By default, look in this directory for a bunch of .whl files which we will
20 # add to the beginning of sys.path before attempting to import anything. This
21 # is done to support downstream re-distributors like Debian and Fedora who
22 # wish to create their own Wheels for our dependencies to aid in debundling.
23 WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
24
25
26 # Define a small helper function to alias our vendored modules to the real ones
27 # if the vendored ones do not exist. This idea of this was taken from
28 # https://github.com/kennethreitz/requests/pull/2567.
29 def vendored(modulename):
30 vendored_name = "{0}.{1}".format(__name__, modulename)
31
32 try:
33 __import__(vendored_name, globals(), locals(), level=0)
34 except ImportError:
35 try:
36 __import__(modulename, globals(), locals(), level=0)
37 except ImportError:
38 # We can just silently allow import failures to pass here. If we
39 # got to this point it means that ``import pip._vendor.whatever``
40 # failed and so did ``import whatever``. Since we're importing this
41 # upfront in an attempt to alias imports, not erroring here will
42 # just mean we get a regular import error whenever pip *actually*
43 # tries to import one of these modules to use it, which actually
44 # gives us a better error message than we would have otherwise
45 # gotten.
46 pass
47 else:
48 sys.modules[vendored_name] = sys.modules[modulename]
49 base, head = vendored_name.rsplit(".", 1)
50 setattr(sys.modules[base], head, sys.modules[modulename])
51
52
53 # If we're operating in a debundled setup, then we want to go ahead and trigger
54 # the aliasing of our vendored libraries as well as looking for wheels to add
55 # to our sys.path. This will cause all of this code to be a no-op typically
56 # however downstream redistributors can enable it in a consistent way across
57 # all platforms.
58 if DEBUNDLED:
59 # Actually look inside of WHEEL_DIR to find .whl files and add them to the
60 # front of our sys.path.
61 sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
62
63 # Actually alias all of our vendored dependencies.
64 vendored("cachecontrol")
65 vendored("colorama")
66 vendored("distlib")
67 vendored("distro")
68 vendored("html5lib")
69 vendored("lockfile")
70 vendored("six")
71 vendored("six.moves")
72 vendored("six.moves.urllib")
73 vendored("packaging")
74 vendored("packaging.version")
75 vendored("packaging.specifiers")
76 vendored("pkg_resources")
77 vendored("progress")
78 vendored("retrying")
79 vendored("requests")
80 vendored("requests.packages")
81 vendored("requests.packages.urllib3")
82 vendored("requests.packages.urllib3._collections")
83 vendored("requests.packages.urllib3.connection")
84 vendored("requests.packages.urllib3.connectionpool")
85 vendored("requests.packages.urllib3.contrib")
86 vendored("requests.packages.urllib3.contrib.ntlmpool")
87 vendored("requests.packages.urllib3.contrib.pyopenssl")
88 vendored("requests.packages.urllib3.exceptions")
89 vendored("requests.packages.urllib3.fields")
90 vendored("requests.packages.urllib3.filepost")
91 vendored("requests.packages.urllib3.packages")
92 vendored("requests.packages.urllib3.packages.ordered_dict")
93 vendored("requests.packages.urllib3.packages.six")
94 vendored("requests.packages.urllib3.packages.ssl_match_hostname")
95 vendored("requests.packages.urllib3.packages.ssl_match_hostname."
96 "_implementation")
97 vendored("requests.packages.urllib3.poolmanager")
98 vendored("requests.packages.urllib3.request")
99 vendored("requests.packages.urllib3.response")
100 vendored("requests.packages.urllib3.util")
101 vendored("requests.packages.urllib3.util.connection")
102 vendored("requests.packages.urllib3.util.request")
103 vendored("requests.packages.urllib3.util.response")
104 vendored("requests.packages.urllib3.util.retry")
105 vendored("requests.packages.urllib3.util.ssl_")
106 vendored("requests.packages.urllib3.util.timeout")
107 vendored("requests.packages.urllib3.util.url")
108
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_vendor/__init__.py b/src/pip/_vendor/__init__.py
--- a/src/pip/_vendor/__init__.py
+++ b/src/pip/_vendor/__init__.py
@@ -70,11 +70,13 @@
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
+ vendored("six.moves.urllib.parse")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
+ vendored("pytoml")
vendored("retrying")
vendored("requests")
vendored("requests.packages")
|
{"golden_diff": "diff --git a/src/pip/_vendor/__init__.py b/src/pip/_vendor/__init__.py\n--- a/src/pip/_vendor/__init__.py\n+++ b/src/pip/_vendor/__init__.py\n@@ -70,11 +70,13 @@\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n+ vendored(\"six.moves.urllib.parse\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n+ vendored(\"pytoml\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n", "issue": "pip un-vendored support is broken\n* Pip version: 9.0.1-465-g841f5dfb\r\n* Python version: 2.7.13\r\n* Operating system: Arch Linux x86_64\r\n\r\n### What I've run:\r\n\r\n```python\r\n> ./.tox/py27-novendor/bin/pip search test\r\nTraceback (most recent call last):\r\n File \"./.tox/py27-novendor/bin/pip\", line 7, in <module>\r\n from pip import main\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/__init__.py\", line 46, in <module>\r\n from pip.vcs import git, mercurial, subversion, bazaar # noqa\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/vcs/mercurial.py\", line 8, in <module>\r\n from pip.download import path_to_url\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/download.py\", line 28, in <module>\r\n from pip._vendor.six.moves.urllib.parse import unquote as urllib_unquote\r\nImportError: No module named parse\r\n```\r\n\r\nand after fixing that one:\r\n\r\n```python\r\nTraceback (most recent call last):\r\n File \"./.tox/py27-novendor/bin/pip\", line 7, in <module>\r\n from pip import main\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/__init__.py\", line 46, in <module>\r\n from pip.vcs import git, mercurial, subversion, bazaar # noqa\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/vcs/subversion.py\", line 9, in <module>\r\n from pip.index import Link\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/index.py\", line 39, in <module>\r\n from pip.wheel import Wheel, wheel_ext\r\n File \".tox/py27-novendor/lib/python2.7/site-packages/pip/wheel.py\", line 21, in <module>\r\n from pip._vendor import pkg_resources, pytoml\r\nImportError: cannot import name pytoml\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n try:\n __import__(modulename, globals(), locals(), level=0)\n except ImportError:\n # We can just silently allow import failures to pass here. If we\n # got to this point it means that ``import pip._vendor.whatever``\n # failed and so did ``import whatever``. Since we're importing this\n # upfront in an attempt to alias imports, not erroring here will\n # just mean we get a regular import error whenever pip *actually*\n # tries to import one of these modules to use it, which actually\n # gives us a better error message than we would have otherwise\n # gotten.\n pass\n else:\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"distro\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "src/pip/_vendor/__init__.py"}], "after_files": [{"content": "\"\"\"\npip._vendor is for vendoring dependencies of pip to prevent needing pip to\ndepend on something external.\n\nFiles inside of pip._vendor should be considered immutable and should only be\nupdated to versions from upstream.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport glob\nimport os.path\nimport sys\n\n# Downstream redistributors which have debundled our dependencies should also\n# patch this value to be true. This will trigger the additional patching\n# to cause things like \"six\" to be available as pip.\nDEBUNDLED = False\n\n# By default, look in this directory for a bunch of .whl files which we will\n# add to the beginning of sys.path before attempting to import anything. This\n# is done to support downstream re-distributors like Debian and Fedora who\n# wish to create their own Wheels for our dependencies to aid in debundling.\nWHEEL_DIR = os.path.abspath(os.path.dirname(__file__))\n\n\n# Define a small helper function to alias our vendored modules to the real ones\n# if the vendored ones do not exist. This idea of this was taken from\n# https://github.com/kennethreitz/requests/pull/2567.\ndef vendored(modulename):\n vendored_name = \"{0}.{1}\".format(__name__, modulename)\n\n try:\n __import__(vendored_name, globals(), locals(), level=0)\n except ImportError:\n try:\n __import__(modulename, globals(), locals(), level=0)\n except ImportError:\n # We can just silently allow import failures to pass here. If we\n # got to this point it means that ``import pip._vendor.whatever``\n # failed and so did ``import whatever``. Since we're importing this\n # upfront in an attempt to alias imports, not erroring here will\n # just mean we get a regular import error whenever pip *actually*\n # tries to import one of these modules to use it, which actually\n # gives us a better error message than we would have otherwise\n # gotten.\n pass\n else:\n sys.modules[vendored_name] = sys.modules[modulename]\n base, head = vendored_name.rsplit(\".\", 1)\n setattr(sys.modules[base], head, sys.modules[modulename])\n\n\n# If we're operating in a debundled setup, then we want to go ahead and trigger\n# the aliasing of our vendored libraries as well as looking for wheels to add\n# to our sys.path. This will cause all of this code to be a no-op typically\n# however downstream redistributors can enable it in a consistent way across\n# all platforms.\nif DEBUNDLED:\n # Actually look inside of WHEEL_DIR to find .whl files and add them to the\n # front of our sys.path.\n sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, \"*.whl\")) + sys.path\n\n # Actually alias all of our vendored dependencies.\n vendored(\"cachecontrol\")\n vendored(\"colorama\")\n vendored(\"distlib\")\n vendored(\"distro\")\n vendored(\"html5lib\")\n vendored(\"lockfile\")\n vendored(\"six\")\n vendored(\"six.moves\")\n vendored(\"six.moves.urllib\")\n vendored(\"six.moves.urllib.parse\")\n vendored(\"packaging\")\n vendored(\"packaging.version\")\n vendored(\"packaging.specifiers\")\n vendored(\"pkg_resources\")\n vendored(\"progress\")\n vendored(\"pytoml\")\n vendored(\"retrying\")\n vendored(\"requests\")\n vendored(\"requests.packages\")\n vendored(\"requests.packages.urllib3\")\n vendored(\"requests.packages.urllib3._collections\")\n vendored(\"requests.packages.urllib3.connection\")\n vendored(\"requests.packages.urllib3.connectionpool\")\n vendored(\"requests.packages.urllib3.contrib\")\n vendored(\"requests.packages.urllib3.contrib.ntlmpool\")\n vendored(\"requests.packages.urllib3.contrib.pyopenssl\")\n vendored(\"requests.packages.urllib3.exceptions\")\n vendored(\"requests.packages.urllib3.fields\")\n vendored(\"requests.packages.urllib3.filepost\")\n vendored(\"requests.packages.urllib3.packages\")\n vendored(\"requests.packages.urllib3.packages.ordered_dict\")\n vendored(\"requests.packages.urllib3.packages.six\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname\")\n vendored(\"requests.packages.urllib3.packages.ssl_match_hostname.\"\n \"_implementation\")\n vendored(\"requests.packages.urllib3.poolmanager\")\n vendored(\"requests.packages.urllib3.request\")\n vendored(\"requests.packages.urllib3.response\")\n vendored(\"requests.packages.urllib3.util\")\n vendored(\"requests.packages.urllib3.util.connection\")\n vendored(\"requests.packages.urllib3.util.request\")\n vendored(\"requests.packages.urllib3.util.response\")\n vendored(\"requests.packages.urllib3.util.retry\")\n vendored(\"requests.packages.urllib3.util.ssl_\")\n vendored(\"requests.packages.urllib3.util.timeout\")\n vendored(\"requests.packages.urllib3.util.url\")\n", "path": "src/pip/_vendor/__init__.py"}]}
| 2,123 | 162 |
gh_patches_debug_31470
|
rasdani/github-patches
|
git_diff
|
cowrie__cowrie-1093
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
greynoise should catch timeout error
```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred:
2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical]
Traceback (most recent call last):
--- <exception caught here> ---
File "/home/cowrie/cowrie/src/cowrie/output/greynoise.py", line 65, in scanip
headers=headers)
twisted.internet.error.TimeoutError: User timeout caused connection failure.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/cowrie/output/greynoise.py`
Content:
```
1 """
2 Send attackers IP to GreyNoise
3 """
4
5 from __future__ import absolute_import, division
6
7 import treq
8
9 from twisted.internet import defer
10 from twisted.python import log
11
12 import cowrie.core.output
13 from cowrie.core.config import CONFIG
14
15 COWRIE_USER_AGENT = 'Cowrie Honeypot'
16 GNAPI_URL = 'http://api.greynoise.io:8888/v1/'
17
18
19 class Output(cowrie.core.output.Output):
20
21 def __init__(self):
22 self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)
23 self.tags = CONFIG.get('output_greynoise', 'tags', fallback="all").split(",")
24 self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)
25 cowrie.core.output.Output.__init__(self)
26
27 def start(self):
28 """
29 Start output plugin
30 """
31
32 def stop(self):
33 """
34 Stop output plugin
35 """
36 pass
37
38 def write(self, entry):
39 if entry['eventid'] == "cowrie.session.connect":
40 self.scanip(entry)
41
42 @defer.inlineCallbacks
43 def scanip(self, entry):
44 """
45 Scan IP againt Greynoise API
46 """
47 def message(query):
48 log.msg(
49 eventid='cowrie.greynoise.result',
50 format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'
51 ' along with the following %(meta)s metadata',
52 IP=entry['src_ip'],
53 tag=query['name'],
54 conf=query['confidence'],
55 meta=query['metadata']
56 )
57
58 gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')
59 headers = ({'User-Agent': [COWRIE_USER_AGENT]})
60 fields = {'key': self.apiKey, 'ip': entry['src_ip']}
61
62 response = yield treq.post(
63 url=gnUrl,
64 data=fields,
65 headers=headers)
66
67 if response.code != 200:
68 rsp = yield response.text()
69 log.error("greynoise: got error {}".format(rsp))
70 return
71
72 j = yield response.json()
73 if self.debug:
74 log.msg("greynoise: debug: "+repr(j))
75 if j['status'] == "ok":
76 if "all" not in self.tags:
77 for query in j['records']:
78 if query['name'] in self.tags:
79 message(query)
80 else:
81 for query in j['records']:
82 message(query)
83 else:
84 log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
85
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py
--- a/src/cowrie/output/greynoise.py
+++ b/src/cowrie/output/greynoise.py
@@ -6,7 +6,7 @@
import treq
-from twisted.internet import defer
+from twisted.internet import defer, error
from twisted.python import log
import cowrie.core.output
@@ -59,10 +59,15 @@
headers = ({'User-Agent': [COWRIE_USER_AGENT]})
fields = {'key': self.apiKey, 'ip': entry['src_ip']}
- response = yield treq.post(
- url=gnUrl,
- data=fields,
- headers=headers)
+ try:
+ response = yield treq.post(
+ url=gnUrl,
+ data=fields,
+ headers=headers,
+ timeout=10)
+ except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):
+ log.msg("GreyNoise requests timeout")
+ return
if response.code != 200:
rsp = yield response.text()
@@ -72,13 +77,14 @@
j = yield response.json()
if self.debug:
log.msg("greynoise: debug: "+repr(j))
- if j['status'] == "ok":
- if "all" not in self.tags:
- for query in j['records']:
- if query['name'] in self.tags:
- message(query)
- else:
- for query in j['records']:
+
+ if j['status'] == "ok":
+ if "all" not in self.tags:
+ for query in j['records']:
+ if query['name'] in self.tags:
message(query)
else:
- log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
+ for query in j['records']:
+ message(query)
+ else:
+ log.msg("greynoise: no results for for IP {0}".format(entry['src_ip']))
|
{"golden_diff": "diff --git a/src/cowrie/output/greynoise.py b/src/cowrie/output/greynoise.py\n--- a/src/cowrie/output/greynoise.py\n+++ b/src/cowrie/output/greynoise.py\n@@ -6,7 +6,7 @@\n \n import treq\n \n-from twisted.internet import defer\n+from twisted.internet import defer, error\n from twisted.python import log\n \n import cowrie.core.output\n@@ -59,10 +59,15 @@\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n \n- response = yield treq.post(\n- url=gnUrl,\n- data=fields,\n- headers=headers)\n+ try:\n+ response = yield treq.post(\n+ url=gnUrl,\n+ data=fields,\n+ headers=headers,\n+ timeout=10)\n+ except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):\n+ log.msg(\"GreyNoise requests timeout\")\n+ return\n \n if response.code != 200:\n rsp = yield response.text()\n@@ -72,13 +77,14 @@\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n- if j['status'] == \"ok\":\n- if \"all\" not in self.tags:\n- for query in j['records']:\n- if query['name'] in self.tags:\n- message(query)\n- else:\n- for query in j['records']:\n+\n+ if j['status'] == \"ok\":\n+ if \"all\" not in self.tags:\n+ for query in j['records']:\n+ if query['name'] in self.tags:\n message(query)\n else:\n- log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n+ for query in j['records']:\n+ message(query)\n+ else:\n+ log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "issue": "greynoise should catch timeout error\n```2019-04-08T03:12:05.460833Z [twisted.internet.defer#critical] Unhandled error in Deferred:\r\n2019-04-08T03:12:05.462257Z [twisted.internet.defer#critical]\r\n Traceback (most recent call last):\r\n --- <exception caught here> ---\r\n File \"/home/cowrie/cowrie/src/cowrie/output/greynoise.py\", line 65, in scanip\r\n headers=headers)\r\n twisted.internet.error.TimeoutError: User timeout caused connection failure.\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nSend attackers IP to GreyNoise\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport treq\n\nfrom twisted.internet import defer\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nCOWRIE_USER_AGENT = 'Cowrie Honeypot'\nGNAPI_URL = 'http://api.greynoise.io:8888/v1/'\n\n\nclass Output(cowrie.core.output.Output):\n\n def __init__(self):\n self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)\n self.tags = CONFIG.get('output_greynoise', 'tags', fallback=\"all\").split(\",\")\n self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)\n cowrie.core.output.Output.__init__(self)\n\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry['eventid'] == \"cowrie.session.connect\":\n self.scanip(entry)\n\n @defer.inlineCallbacks\n def scanip(self, entry):\n \"\"\"\n Scan IP againt Greynoise API\n \"\"\"\n def message(query):\n log.msg(\n eventid='cowrie.greynoise.result',\n format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'\n ' along with the following %(meta)s metadata',\n IP=entry['src_ip'],\n tag=query['name'],\n conf=query['confidence'],\n meta=query['metadata']\n )\n\n gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n\n response = yield treq.post(\n url=gnUrl,\n data=fields,\n headers=headers)\n\n if response.code != 200:\n rsp = yield response.text()\n log.error(\"greynoise: got error {}\".format(rsp))\n return\n\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n if j['status'] == \"ok\":\n if \"all\" not in self.tags:\n for query in j['records']:\n if query['name'] in self.tags:\n message(query)\n else:\n for query in j['records']:\n message(query)\n else:\n log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "path": "src/cowrie/output/greynoise.py"}], "after_files": [{"content": "\"\"\"\nSend attackers IP to GreyNoise\n\"\"\"\n\nfrom __future__ import absolute_import, division\n\nimport treq\n\nfrom twisted.internet import defer, error\nfrom twisted.python import log\n\nimport cowrie.core.output\nfrom cowrie.core.config import CONFIG\n\nCOWRIE_USER_AGENT = 'Cowrie Honeypot'\nGNAPI_URL = 'http://api.greynoise.io:8888/v1/'\n\n\nclass Output(cowrie.core.output.Output):\n\n def __init__(self):\n self.apiKey = CONFIG.get('output_greynoise', 'api_key', fallback=None)\n self.tags = CONFIG.get('output_greynoise', 'tags', fallback=\"all\").split(\",\")\n self.debug = CONFIG.getboolean('output_greynoise', 'debug', fallback=False)\n cowrie.core.output.Output.__init__(self)\n\n def start(self):\n \"\"\"\n Start output plugin\n \"\"\"\n\n def stop(self):\n \"\"\"\n Stop output plugin\n \"\"\"\n pass\n\n def write(self, entry):\n if entry['eventid'] == \"cowrie.session.connect\":\n self.scanip(entry)\n\n @defer.inlineCallbacks\n def scanip(self, entry):\n \"\"\"\n Scan IP againt Greynoise API\n \"\"\"\n def message(query):\n log.msg(\n eventid='cowrie.greynoise.result',\n format='greynoise: Scan for %(IP)s with %(tag)s have %(conf)s confidence'\n ' along with the following %(meta)s metadata',\n IP=entry['src_ip'],\n tag=query['name'],\n conf=query['confidence'],\n meta=query['metadata']\n )\n\n gnUrl = '{0}query/ip'.format(GNAPI_URL).encode('utf8')\n headers = ({'User-Agent': [COWRIE_USER_AGENT]})\n fields = {'key': self.apiKey, 'ip': entry['src_ip']}\n\n try:\n response = yield treq.post(\n url=gnUrl,\n data=fields,\n headers=headers,\n timeout=10)\n except (defer.CancelledError, error.ConnectingCancelledError, error.DNSLookupError):\n log.msg(\"GreyNoise requests timeout\")\n return\n\n if response.code != 200:\n rsp = yield response.text()\n log.error(\"greynoise: got error {}\".format(rsp))\n return\n\n j = yield response.json()\n if self.debug:\n log.msg(\"greynoise: debug: \"+repr(j))\n\n if j['status'] == \"ok\":\n if \"all\" not in self.tags:\n for query in j['records']:\n if query['name'] in self.tags:\n message(query)\n else:\n for query in j['records']:\n message(query)\n else:\n log.msg(\"greynoise: no results for for IP {0}\".format(entry['src_ip']))\n", "path": "src/cowrie/output/greynoise.py"}]}
| 1,158 | 474 |
gh_patches_debug_3627
|
rasdani/github-patches
|
git_diff
|
ethereum__web3.py-912
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Upgrade to or add support for websockets v5
### What was wrong?
We are currently using the `websockets` library's v4 line. The v5 line is out.
### How can it be fixed?
Look into adding support for both v4 and v5.
If this is too cumbersome, we can simply upgrade to requiring `>=v5`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 #!/usr/bin/env python
2 # -*- coding: utf-8 -*-
3 from setuptools import (
4 find_packages,
5 setup,
6 )
7
8
9 setup(
10 name='web3',
11 # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.
12 version='4.3.0',
13 description="""Web3.py""",
14 long_description_markdown_filename='README.md',
15 author='Piper Merriam',
16 author_email='[email protected]',
17 url='https://github.com/ethereum/web3.py',
18 include_package_data=True,
19 install_requires=[
20 "toolz>=0.9.0,<1.0.0;implementation_name=='pypy'",
21 "cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'",
22 "eth-abi>=1.1.1,<2",
23 "eth-account>=0.2.1,<0.3.0",
24 "eth-utils>=1.0.1,<2.0.0",
25 "hexbytes>=0.1.0,<1.0.0",
26 "lru-dict>=1.1.6,<2.0.0",
27 "eth-hash[pycryptodome]",
28 "requests>=2.16.0,<3.0.0",
29 "websockets>=4.0.1,<5.0.0",
30 "pypiwin32>=223;platform_system=='Windows'",
31 ],
32 setup_requires=['setuptools-markdown'],
33 python_requires='>=3.5, <4',
34 extras_require={
35 'tester': [
36 "eth-tester[py-evm]==0.1.0-beta.26",
37 "py-geth>=2.0.1,<3.0.0",
38 ],
39 'testrpc': ["eth-testrpc>=1.3.3,<2.0.0"],
40 'linter': [
41 "flake8==3.4.1",
42 "isort>=4.2.15,<5",
43 ],
44 },
45 py_modules=['web3', 'ens'],
46 license="MIT",
47 zip_safe=False,
48 keywords='ethereum',
49 packages=find_packages(exclude=["tests", "tests.*"]),
50 classifiers=[
51 'Development Status :: 5 - Production/Stable',
52 'Intended Audience :: Developers',
53 'License :: OSI Approved :: MIT License',
54 'Natural Language :: English',
55 'Programming Language :: Python :: 3',
56 'Programming Language :: Python :: 3.5',
57 'Programming Language :: Python :: 3.6',
58 ],
59 )
60
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -26,7 +26,7 @@
"lru-dict>=1.1.6,<2.0.0",
"eth-hash[pycryptodome]",
"requests>=2.16.0,<3.0.0",
- "websockets>=4.0.1,<5.0.0",
+ "websockets>=5.0.1,<6.0.0",
"pypiwin32>=223;platform_system=='Windows'",
],
setup_requires=['setuptools-markdown'],
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -26,7 +26,7 @@\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n- \"websockets>=4.0.1,<5.0.0\",\n+ \"websockets>=5.0.1,<6.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n", "issue": "Upgrade to or add support for websockets v5\n### What was wrong?\r\n\r\nWe are currently using the `websockets` library's v4 line. The v5 line is out.\r\n\r\n### How can it be fixed?\r\n\r\nLook into adding support for both v4 and v5.\r\n\r\nIf this is too cumbersome, we can simply upgrade to requiring `>=v5`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.3.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.3.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=4.0.1,<5.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.26\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom setuptools import (\n find_packages,\n setup,\n)\n\n\nsetup(\n name='web3',\n # *IMPORTANT*: Don't manually change the version here. Use the 'bumpversion' utility.\n version='4.3.0',\n description=\"\"\"Web3.py\"\"\",\n long_description_markdown_filename='README.md',\n author='Piper Merriam',\n author_email='[email protected]',\n url='https://github.com/ethereum/web3.py',\n include_package_data=True,\n install_requires=[\n \"toolz>=0.9.0,<1.0.0;implementation_name=='pypy'\",\n \"cytoolz>=0.9.0,<1.0.0;implementation_name=='cpython'\",\n \"eth-abi>=1.1.1,<2\",\n \"eth-account>=0.2.1,<0.3.0\",\n \"eth-utils>=1.0.1,<2.0.0\",\n \"hexbytes>=0.1.0,<1.0.0\",\n \"lru-dict>=1.1.6,<2.0.0\",\n \"eth-hash[pycryptodome]\",\n \"requests>=2.16.0,<3.0.0\",\n \"websockets>=5.0.1,<6.0.0\",\n \"pypiwin32>=223;platform_system=='Windows'\",\n ],\n setup_requires=['setuptools-markdown'],\n python_requires='>=3.5, <4',\n extras_require={\n 'tester': [\n \"eth-tester[py-evm]==0.1.0-beta.26\",\n \"py-geth>=2.0.1,<3.0.0\",\n ],\n 'testrpc': [\"eth-testrpc>=1.3.3,<2.0.0\"],\n 'linter': [\n \"flake8==3.4.1\",\n \"isort>=4.2.15,<5\",\n ],\n },\n py_modules=['web3', 'ens'],\n license=\"MIT\",\n zip_safe=False,\n keywords='ethereum',\n packages=find_packages(exclude=[\"tests\", \"tests.*\"]),\n classifiers=[\n 'Development Status :: 5 - Production/Stable',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n)\n", "path": "setup.py"}]}
| 1,011 | 142 |
gh_patches_debug_16308
|
rasdani/github-patches
|
git_diff
|
projectmesa__mesa-755
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make opening the browser when launching the server optional
**What's the problem this feature will solve?**
When we call `server.launch()` on a `ModularServer` instance the browser always opens another tab. This is not always desired behavior.
**Describe the solution you'd like**
We should be able to make this optional. To maintain backwards compatibility we can keep the current behavior as default but over ride it with something like `server.launch(open_browser=False)`
**Additional context**
I will make a PR with this simple change.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mesa/visualization/ModularVisualization.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 """
3 ModularServer
4 =============
5
6 A visualization server which renders a model via one or more elements.
7
8 The concept for the modular visualization server as follows:
9 A visualization is composed of VisualizationElements, each of which defines how
10 to generate some visualization from a model instance and render it on the
11 client. VisualizationElements may be anything from a simple text display to
12 a multilayered HTML5 canvas.
13
14 The actual server is launched with one or more VisualizationElements;
15 it runs the model object through each of them, generating data to be sent to
16 the client. The client page is also generated based on the JavaScript code
17 provided by each element.
18
19 This file consists of the following classes:
20
21 VisualizationElement: Parent class for all other visualization elements, with
22 the minimal necessary options.
23 PageHandler: The handler for the visualization page, generated from a template
24 and built from the various visualization elements.
25 SocketHandler: Handles the websocket connection between the client page and
26 the server.
27 ModularServer: The overall visualization application class which stores and
28 controls the model and visualization instance.
29
30
31 ModularServer should *not* need to be subclassed on a model-by-model basis; it
32 should be primarily a pass-through for VisualizationElement subclasses, which
33 define the actual visualization specifics.
34
35 For example, suppose we have created two visualization elements for our model,
36 called canvasvis and graphvis; we would launch a server with:
37
38 server = ModularServer(MyModel, [canvasvis, graphvis], name="My Model")
39 server.launch()
40
41 The client keeps track of what step it is showing. Clicking the Step button in
42 the browser sends a message requesting the viz_state corresponding to the next
43 step position, which is then sent back to the client via the websocket.
44
45 The websocket protocol is as follows:
46 Each message is a JSON object, with a "type" property which defines the rest of
47 the structure.
48
49 Server -> Client:
50 Send over the model state to visualize.
51 Model state is a list, with each element corresponding to a div; each div
52 is expected to have a render function associated with it, which knows how
53 to render that particular data. The example below includes two elements:
54 the first is data for a CanvasGrid, the second for a raw text display.
55
56 {
57 "type": "viz_state",
58 "data": [{0:[ {"Shape": "circle", "x": 0, "y": 0, "r": 0.5,
59 "Color": "#AAAAAA", "Filled": "true", "Layer": 0,
60 "text": 'A', "text_color": "white" }]},
61 "Shape Count: 1"]
62 }
63
64 Informs the client that the model is over.
65 {"type": "end"}
66
67 Informs the client of the current model's parameters
68 {
69 "type": "model_params",
70 "params": 'dict' of model params, (i.e. {arg_1: val_1, ...})
71 }
72
73 Client -> Server:
74 Reset the model.
75 TODO: Allow this to come with parameters
76 {
77 "type": "reset"
78 }
79
80 Get a given state.
81 {
82 "type": "get_step",
83 "step:" index of the step to get.
84 }
85
86 Submit model parameter updates
87 {
88 "type": "submit_params",
89 "param": name of model parameter
90 "value": new value for 'param'
91 }
92
93 Get the model's parameters
94 {
95 "type": "get_params"
96 }
97
98 """
99 import os
100 import tornado.autoreload
101 import tornado.ioloop
102 import tornado.web
103 import tornado.websocket
104 import tornado.escape
105 import tornado.gen
106 import webbrowser
107
108 from mesa.visualization.UserParam import UserSettableParameter
109
110 # Suppress several pylint warnings for this file.
111 # Attributes being defined outside of init is a Tornado feature.
112 # pylint: disable=attribute-defined-outside-init
113
114
115 class VisualizationElement:
116 """
117 Defines an element of the visualization.
118
119 Attributes:
120 package_includes: A list of external JavaScript files to include that
121 are part of the Mesa packages.
122 local_includes: A list of JavaScript files that are local to the
123 directory that the server is being run in.
124 js_code: A JavaScript code string to instantiate the element.
125
126 Methods:
127 render: Takes a model object, and produces JSON data which can be sent
128 to the client.
129
130 """
131
132 package_includes = []
133 local_includes = []
134 js_code = ''
135 render_args = {}
136
137 def __init__(self):
138 pass
139
140 def render(self, model):
141 """ Build visualization data from a model object.
142
143 Args:
144 model: A model object
145
146 Returns:
147 A JSON-ready object.
148
149 """
150 return "<b>VisualizationElement goes here</b>."
151
152 # =============================================================================
153 # Actual Tornado code starts here:
154
155
156 class PageHandler(tornado.web.RequestHandler):
157 """ Handler for the HTML template which holds the visualization. """
158
159 def get(self):
160 elements = self.application.visualization_elements
161 for i, element in enumerate(elements):
162 element.index = i
163 self.render("modular_template.html", port=self.application.port,
164 model_name=self.application.model_name,
165 description=self.application.description,
166 package_includes=self.application.package_includes,
167 local_includes=self.application.local_includes,
168 scripts=self.application.js_code)
169
170
171 class SocketHandler(tornado.websocket.WebSocketHandler):
172 """ Handler for websocket. """
173 def open(self):
174 if self.application.verbose:
175 print("Socket opened!")
176 self.write_message({
177 "type": "model_params",
178 "params": self.application.user_params
179 })
180
181 def check_origin(self, origin):
182 return True
183
184 @property
185 def viz_state_message(self):
186 return {
187 "type": "viz_state",
188 "data": self.application.render_model()
189 }
190
191 def on_message(self, message):
192 """ Receiving a message from the websocket, parse, and act accordingly.
193
194 """
195 if self.application.verbose:
196 print(message)
197 msg = tornado.escape.json_decode(message)
198
199 if msg["type"] == "get_step":
200 if not self.application.model.running:
201 self.write_message({"type": "end"})
202 else:
203 self.application.model.step()
204 self.write_message(self.viz_state_message)
205
206 elif msg["type"] == "reset":
207 self.application.reset_model()
208 self.write_message(self.viz_state_message)
209
210 elif msg["type"] == "submit_params":
211 param = msg["param"]
212 value = msg["value"]
213
214 # Is the param editable?
215 if param in self.application.user_params:
216 if isinstance(self.application.model_kwargs[param], UserSettableParameter):
217 self.application.model_kwargs[param].value = value
218 else:
219 self.application.model_kwargs[param] = value
220
221 else:
222 if self.application.verbose:
223 print("Unexpected message!")
224
225
226 class ModularServer(tornado.web.Application):
227 """ Main visualization application. """
228 verbose = True
229
230 port = 8521 # Default port to listen on
231 max_steps = 100000
232
233 # Handlers and other globals:
234 page_handler = (r'/', PageHandler)
235 socket_handler = (r'/ws', SocketHandler)
236 static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,
237 {"path": os.path.dirname(__file__) + "/templates"})
238 local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,
239 {"path": ''})
240
241 handlers = [page_handler, socket_handler, static_handler, local_handler]
242
243 settings = {"debug": True,
244 "autoreload": False,
245 "template_path": os.path.dirname(__file__) + "/templates"}
246
247 EXCLUDE_LIST = ('width', 'height',)
248
249 def __init__(self, model_cls, visualization_elements, name="Mesa Model",
250 model_params={}):
251 """ Create a new visualization server with the given elements. """
252 # Prep visualization elements:
253 self.visualization_elements = visualization_elements
254 self.package_includes = set()
255 self.local_includes = set()
256 self.js_code = []
257 for element in self.visualization_elements:
258 for include_file in element.package_includes:
259 self.package_includes.add(include_file)
260 for include_file in element.local_includes:
261 self.local_includes.add(include_file)
262 self.js_code.append(element.js_code)
263
264 # Initializing the model
265 self.model_name = name
266 self.model_cls = model_cls
267 self.description = 'No description available'
268 if hasattr(model_cls, 'description'):
269 self.description = model_cls.description
270 elif model_cls.__doc__ is not None:
271 self.description = model_cls.__doc__
272
273 self.model_kwargs = model_params
274 self.reset_model()
275
276 # Initializing the application itself:
277 super().__init__(self.handlers, **self.settings)
278
279 @property
280 def user_params(self):
281 result = {}
282 for param, val in self.model_kwargs.items():
283 if isinstance(val, UserSettableParameter):
284 result[param] = val.json
285
286 return result
287
288 def reset_model(self):
289 """ Reinstantiate the model object, using the current parameters. """
290
291 model_params = {}
292 for key, val in self.model_kwargs.items():
293 if isinstance(val, UserSettableParameter):
294 if val.param_type == 'static_text': # static_text is never used for setting params
295 continue
296 model_params[key] = val.value
297 else:
298 model_params[key] = val
299
300 self.model = self.model_cls(**model_params)
301
302 def render_model(self):
303 """ Turn the current state of the model into a dictionary of
304 visualizations
305
306 """
307 visualization_state = []
308 for element in self.visualization_elements:
309 element_state = element.render(self.model)
310 visualization_state.append(element_state)
311 return visualization_state
312
313 def launch(self, port=None):
314 """ Run the app. """
315 if port is not None:
316 self.port = port
317 url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)
318 print('Interface starting at {url}'.format(url=url))
319 self.listen(self.port)
320 webbrowser.open(url)
321 tornado.autoreload.start()
322 tornado.ioloop.IOLoop.current().start()
323
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py
--- a/mesa/visualization/ModularVisualization.py
+++ b/mesa/visualization/ModularVisualization.py
@@ -310,13 +310,14 @@
visualization_state.append(element_state)
return visualization_state
- def launch(self, port=None):
+ def launch(self, port=None, open_browser=True):
""" Run the app. """
if port is not None:
self.port = port
url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)
print('Interface starting at {url}'.format(url=url))
self.listen(self.port)
- webbrowser.open(url)
+ if open_browser:
+ webbrowser.open(url)
tornado.autoreload.start()
tornado.ioloop.IOLoop.current().start()
|
{"golden_diff": "diff --git a/mesa/visualization/ModularVisualization.py b/mesa/visualization/ModularVisualization.py\n--- a/mesa/visualization/ModularVisualization.py\n+++ b/mesa/visualization/ModularVisualization.py\n@@ -310,13 +310,14 @@\n visualization_state.append(element_state)\n return visualization_state\n \n- def launch(self, port=None):\n+ def launch(self, port=None, open_browser=True):\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)\n print('Interface starting at {url}'.format(url=url))\n self.listen(self.port)\n- webbrowser.open(url)\n+ if open_browser:\n+ webbrowser.open(url)\n tornado.autoreload.start()\n tornado.ioloop.IOLoop.current().start()\n", "issue": "Make opening the browser when launching the server optional\n**What's the problem this feature will solve?**\r\nWhen we call `server.launch()` on a `ModularServer` instance the browser always opens another tab. This is not always desired behavior. \r\n\r\n**Describe the solution you'd like**\r\nWe should be able to make this optional. To maintain backwards compatibility we can keep the current behavior as default but over ride it with something like `server.launch(open_browser=False)`\r\n\r\n**Additional context**\r\nI will make a PR with this simple change.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\n Informs the client of the current model's parameters\n {\n \"type\": \"model_params\",\n \"params\": 'dict' of model params, (i.e. {arg_1: val_1, ...})\n }\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n Submit model parameter updates\n {\n \"type\": \"submit_params\",\n \"param\": name of model parameter\n \"value\": new value for 'param'\n }\n\n Get the model's parameters\n {\n \"type\": \"get_params\"\n }\n\n\"\"\"\nimport os\nimport tornado.autoreload\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\nimport webbrowser\n\nfrom mesa.visualization.UserParam import UserSettableParameter\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript files to include that\n are part of the Mesa packages.\n local_includes: A list of JavaScript files that are local to the\n directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = ''\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\" Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\" Handler for the HTML template which holds the visualization. \"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\"modular_template.html\", port=self.application.port,\n model_name=self.application.model_name,\n description=self.application.description,\n package_includes=self.application.package_includes,\n local_includes=self.application.local_includes,\n scripts=self.application.js_code)\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\" Handler for websocket. \"\"\"\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n self.write_message({\n \"type\": \"model_params\",\n \"params\": self.application.user_params\n })\n\n def check_origin(self, origin):\n return True\n\n @property\n def viz_state_message(self):\n return {\n \"type\": \"viz_state\",\n \"data\": self.application.render_model()\n }\n\n def on_message(self, message):\n \"\"\" Receiving a message from the websocket, parse, and act accordingly.\n\n \"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n if not self.application.model.running:\n self.write_message({\"type\": \"end\"})\n else:\n self.application.model.step()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"submit_params\":\n param = msg[\"param\"]\n value = msg[\"value\"]\n\n # Is the param editable?\n if param in self.application.user_params:\n if isinstance(self.application.model_kwargs[param], UserSettableParameter):\n self.application.model_kwargs[param].value = value\n else:\n self.application.model_kwargs[param] = value\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\" Main visualization application. \"\"\"\n verbose = True\n\n port = 8521 # Default port to listen on\n max_steps = 100000\n\n # Handlers and other globals:\n page_handler = (r'/', PageHandler)\n socket_handler = (r'/ws', SocketHandler)\n static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"})\n local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,\n {\"path\": ''})\n\n handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n settings = {\"debug\": True,\n \"autoreload\": False,\n \"template_path\": os.path.dirname(__file__) + \"/templates\"}\n\n EXCLUDE_LIST = ('width', 'height',)\n\n def __init__(self, model_cls, visualization_elements, name=\"Mesa Model\",\n model_params={}):\n \"\"\" Create a new visualization server with the given elements. \"\"\"\n # Prep visualization elements:\n self.visualization_elements = visualization_elements\n self.package_includes = set()\n self.local_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n self.package_includes.add(include_file)\n for include_file in element.local_includes:\n self.local_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n self.description = 'No description available'\n if hasattr(model_cls, 'description'):\n self.description = model_cls.description\n elif model_cls.__doc__ is not None:\n self.description = model_cls.__doc__\n\n self.model_kwargs = model_params\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n @property\n def user_params(self):\n result = {}\n for param, val in self.model_kwargs.items():\n if isinstance(val, UserSettableParameter):\n result[param] = val.json\n\n return result\n\n def reset_model(self):\n \"\"\" Reinstantiate the model object, using the current parameters. \"\"\"\n\n model_params = {}\n for key, val in self.model_kwargs.items():\n if isinstance(val, UserSettableParameter):\n if val.param_type == 'static_text': # static_text is never used for setting params\n continue\n model_params[key] = val.value\n else:\n model_params[key] = val\n\n self.model = self.model_cls(**model_params)\n\n def render_model(self):\n \"\"\" Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None):\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)\n print('Interface starting at {url}'.format(url=url))\n self.listen(self.port)\n webbrowser.open(url)\n tornado.autoreload.start()\n tornado.ioloop.IOLoop.current().start()\n", "path": "mesa/visualization/ModularVisualization.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\"\"\"\nModularServer\n=============\n\nA visualization server which renders a model via one or more elements.\n\nThe concept for the modular visualization server as follows:\nA visualization is composed of VisualizationElements, each of which defines how\nto generate some visualization from a model instance and render it on the\nclient. VisualizationElements may be anything from a simple text display to\na multilayered HTML5 canvas.\n\nThe actual server is launched with one or more VisualizationElements;\nit runs the model object through each of them, generating data to be sent to\nthe client. The client page is also generated based on the JavaScript code\nprovided by each element.\n\nThis file consists of the following classes:\n\nVisualizationElement: Parent class for all other visualization elements, with\n the minimal necessary options.\nPageHandler: The handler for the visualization page, generated from a template\n and built from the various visualization elements.\nSocketHandler: Handles the websocket connection between the client page and\n the server.\nModularServer: The overall visualization application class which stores and\n controls the model and visualization instance.\n\n\nModularServer should *not* need to be subclassed on a model-by-model basis; it\nshould be primarily a pass-through for VisualizationElement subclasses, which\ndefine the actual visualization specifics.\n\nFor example, suppose we have created two visualization elements for our model,\ncalled canvasvis and graphvis; we would launch a server with:\n\n server = ModularServer(MyModel, [canvasvis, graphvis], name=\"My Model\")\n server.launch()\n\nThe client keeps track of what step it is showing. Clicking the Step button in\nthe browser sends a message requesting the viz_state corresponding to the next\nstep position, which is then sent back to the client via the websocket.\n\nThe websocket protocol is as follows:\nEach message is a JSON object, with a \"type\" property which defines the rest of\nthe structure.\n\nServer -> Client:\n Send over the model state to visualize.\n Model state is a list, with each element corresponding to a div; each div\n is expected to have a render function associated with it, which knows how\n to render that particular data. The example below includes two elements:\n the first is data for a CanvasGrid, the second for a raw text display.\n\n {\n \"type\": \"viz_state\",\n \"data\": [{0:[ {\"Shape\": \"circle\", \"x\": 0, \"y\": 0, \"r\": 0.5,\n \"Color\": \"#AAAAAA\", \"Filled\": \"true\", \"Layer\": 0,\n \"text\": 'A', \"text_color\": \"white\" }]},\n \"Shape Count: 1\"]\n }\n\n Informs the client that the model is over.\n {\"type\": \"end\"}\n\n Informs the client of the current model's parameters\n {\n \"type\": \"model_params\",\n \"params\": 'dict' of model params, (i.e. {arg_1: val_1, ...})\n }\n\nClient -> Server:\n Reset the model.\n TODO: Allow this to come with parameters\n {\n \"type\": \"reset\"\n }\n\n Get a given state.\n {\n \"type\": \"get_step\",\n \"step:\" index of the step to get.\n }\n\n Submit model parameter updates\n {\n \"type\": \"submit_params\",\n \"param\": name of model parameter\n \"value\": new value for 'param'\n }\n\n Get the model's parameters\n {\n \"type\": \"get_params\"\n }\n\n\"\"\"\nimport os\nimport tornado.autoreload\nimport tornado.ioloop\nimport tornado.web\nimport tornado.websocket\nimport tornado.escape\nimport tornado.gen\nimport webbrowser\n\nfrom mesa.visualization.UserParam import UserSettableParameter\n\n# Suppress several pylint warnings for this file.\n# Attributes being defined outside of init is a Tornado feature.\n# pylint: disable=attribute-defined-outside-init\n\n\nclass VisualizationElement:\n \"\"\"\n Defines an element of the visualization.\n\n Attributes:\n package_includes: A list of external JavaScript files to include that\n are part of the Mesa packages.\n local_includes: A list of JavaScript files that are local to the\n directory that the server is being run in.\n js_code: A JavaScript code string to instantiate the element.\n\n Methods:\n render: Takes a model object, and produces JSON data which can be sent\n to the client.\n\n \"\"\"\n\n package_includes = []\n local_includes = []\n js_code = ''\n render_args = {}\n\n def __init__(self):\n pass\n\n def render(self, model):\n \"\"\" Build visualization data from a model object.\n\n Args:\n model: A model object\n\n Returns:\n A JSON-ready object.\n\n \"\"\"\n return \"<b>VisualizationElement goes here</b>.\"\n\n# =============================================================================\n# Actual Tornado code starts here:\n\n\nclass PageHandler(tornado.web.RequestHandler):\n \"\"\" Handler for the HTML template which holds the visualization. \"\"\"\n\n def get(self):\n elements = self.application.visualization_elements\n for i, element in enumerate(elements):\n element.index = i\n self.render(\"modular_template.html\", port=self.application.port,\n model_name=self.application.model_name,\n description=self.application.description,\n package_includes=self.application.package_includes,\n local_includes=self.application.local_includes,\n scripts=self.application.js_code)\n\n\nclass SocketHandler(tornado.websocket.WebSocketHandler):\n \"\"\" Handler for websocket. \"\"\"\n def open(self):\n if self.application.verbose:\n print(\"Socket opened!\")\n self.write_message({\n \"type\": \"model_params\",\n \"params\": self.application.user_params\n })\n\n def check_origin(self, origin):\n return True\n\n @property\n def viz_state_message(self):\n return {\n \"type\": \"viz_state\",\n \"data\": self.application.render_model()\n }\n\n def on_message(self, message):\n \"\"\" Receiving a message from the websocket, parse, and act accordingly.\n\n \"\"\"\n if self.application.verbose:\n print(message)\n msg = tornado.escape.json_decode(message)\n\n if msg[\"type\"] == \"get_step\":\n if not self.application.model.running:\n self.write_message({\"type\": \"end\"})\n else:\n self.application.model.step()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"reset\":\n self.application.reset_model()\n self.write_message(self.viz_state_message)\n\n elif msg[\"type\"] == \"submit_params\":\n param = msg[\"param\"]\n value = msg[\"value\"]\n\n # Is the param editable?\n if param in self.application.user_params:\n if isinstance(self.application.model_kwargs[param], UserSettableParameter):\n self.application.model_kwargs[param].value = value\n else:\n self.application.model_kwargs[param] = value\n\n else:\n if self.application.verbose:\n print(\"Unexpected message!\")\n\n\nclass ModularServer(tornado.web.Application):\n \"\"\" Main visualization application. \"\"\"\n verbose = True\n\n port = 8521 # Default port to listen on\n max_steps = 100000\n\n # Handlers and other globals:\n page_handler = (r'/', PageHandler)\n socket_handler = (r'/ws', SocketHandler)\n static_handler = (r'/static/(.*)', tornado.web.StaticFileHandler,\n {\"path\": os.path.dirname(__file__) + \"/templates\"})\n local_handler = (r'/local/(.*)', tornado.web.StaticFileHandler,\n {\"path\": ''})\n\n handlers = [page_handler, socket_handler, static_handler, local_handler]\n\n settings = {\"debug\": True,\n \"autoreload\": False,\n \"template_path\": os.path.dirname(__file__) + \"/templates\"}\n\n EXCLUDE_LIST = ('width', 'height',)\n\n def __init__(self, model_cls, visualization_elements, name=\"Mesa Model\",\n model_params={}):\n \"\"\" Create a new visualization server with the given elements. \"\"\"\n # Prep visualization elements:\n self.visualization_elements = visualization_elements\n self.package_includes = set()\n self.local_includes = set()\n self.js_code = []\n for element in self.visualization_elements:\n for include_file in element.package_includes:\n self.package_includes.add(include_file)\n for include_file in element.local_includes:\n self.local_includes.add(include_file)\n self.js_code.append(element.js_code)\n\n # Initializing the model\n self.model_name = name\n self.model_cls = model_cls\n self.description = 'No description available'\n if hasattr(model_cls, 'description'):\n self.description = model_cls.description\n elif model_cls.__doc__ is not None:\n self.description = model_cls.__doc__\n\n self.model_kwargs = model_params\n self.reset_model()\n\n # Initializing the application itself:\n super().__init__(self.handlers, **self.settings)\n\n @property\n def user_params(self):\n result = {}\n for param, val in self.model_kwargs.items():\n if isinstance(val, UserSettableParameter):\n result[param] = val.json\n\n return result\n\n def reset_model(self):\n \"\"\" Reinstantiate the model object, using the current parameters. \"\"\"\n\n model_params = {}\n for key, val in self.model_kwargs.items():\n if isinstance(val, UserSettableParameter):\n if val.param_type == 'static_text': # static_text is never used for setting params\n continue\n model_params[key] = val.value\n else:\n model_params[key] = val\n\n self.model = self.model_cls(**model_params)\n\n def render_model(self):\n \"\"\" Turn the current state of the model into a dictionary of\n visualizations\n\n \"\"\"\n visualization_state = []\n for element in self.visualization_elements:\n element_state = element.render(self.model)\n visualization_state.append(element_state)\n return visualization_state\n\n def launch(self, port=None, open_browser=True):\n \"\"\" Run the app. \"\"\"\n if port is not None:\n self.port = port\n url = 'http://127.0.0.1:{PORT}'.format(PORT=self.port)\n print('Interface starting at {url}'.format(url=url))\n self.listen(self.port)\n if open_browser:\n webbrowser.open(url)\n tornado.autoreload.start()\n tornado.ioloop.IOLoop.current().start()\n", "path": "mesa/visualization/ModularVisualization.py"}]}
| 3,463 | 199 |
gh_patches_debug_30860
|
rasdani/github-patches
|
git_diff
|
TheAlgorithms__Python-2032
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Mergesort Update Variable Names
I was looking over the mergesort.py file in the divide_and_conquer directory when I saw that all of the variable names are a single letter and there is not much documentation. Does anyone know enough about this file to improve the variable names and make the code more understandable?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `divide_and_conquer/mergesort.py`
Content:
```
1 def merge(a, b, m, e):
2 l = a[b : m + 1] # noqa: E741
3 r = a[m + 1 : e + 1]
4 k = b
5 i = 0
6 j = 0
7 while i < len(l) and j < len(r):
8 # change sign for Descending order
9 if l[i] < r[j]:
10 a[k] = l[i]
11 i += 1
12 else:
13 a[k] = r[j]
14 j += 1
15 k += 1
16 while i < len(l):
17 a[k] = l[i]
18 i += 1
19 k += 1
20 while j < len(r):
21 a[k] = r[j]
22 j += 1
23 k += 1
24 return a
25
26
27 def mergesort(a, b, e):
28 """
29 >>> mergesort([3,2,1],0,2)
30 [1, 2, 3]
31 >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)
32 [0, 1, 1, 2, 2, 3, 3, 4, 5]
33 """
34 if b < e:
35 m = (b + e) // 2
36 # print("ms1",a,b,m)
37 mergesort(a, b, m)
38 # print("ms2",a,m+1,e)
39 mergesort(a, m + 1, e)
40 # print("m",a,b,m,e)
41 merge(a, b, m, e)
42 return a
43
44
45 if __name__ == "__main__":
46 import doctest
47
48 doctest.testmod()
49
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py
--- a/divide_and_conquer/mergesort.py
+++ b/divide_and_conquer/mergesort.py
@@ -1,45 +1,48 @@
-def merge(a, b, m, e):
- l = a[b : m + 1] # noqa: E741
- r = a[m + 1 : e + 1]
- k = b
+def merge(arr, left, mid, right):
+ # overall array will divided into 2 array
+ # left_arr contains the left portion of array from left to mid
+ # right_arr contains the right portion of array from mid + 1 to right
+ left_arr = arr[left : mid + 1]
+ right_arr = arr[mid + 1 : right + 1]
+ k = left
i = 0
j = 0
- while i < len(l) and j < len(r):
+ while i < len(left_arr) and j < len(right_arr):
# change sign for Descending order
- if l[i] < r[j]:
- a[k] = l[i]
+ if left_arr[i] < right_arr[j]:
+ arr[k] = left_arr[i]
i += 1
else:
- a[k] = r[j]
+ arr[k] = right_arr[j]
j += 1
k += 1
- while i < len(l):
- a[k] = l[i]
+ while i < len(left_arr):
+ arr[k] = left_arr[i]
i += 1
k += 1
- while j < len(r):
- a[k] = r[j]
+ while j < len(right_arr):
+ arr[k] = right_arr[j]
j += 1
k += 1
- return a
+ return arr
-def mergesort(a, b, e):
+def mergesort(arr, left, right):
"""
- >>> mergesort([3,2,1],0,2)
+ >>> mergesort([3, 2, 1], 0, 2)
[1, 2, 3]
- >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)
+ >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)
[0, 1, 1, 2, 2, 3, 3, 4, 5]
"""
- if b < e:
- m = (b + e) // 2
+ if left < right:
+ mid = (left + right) // 2
# print("ms1",a,b,m)
- mergesort(a, b, m)
+ mergesort(arr, left, mid)
# print("ms2",a,m+1,e)
- mergesort(a, m + 1, e)
+ mergesort(arr, mid + 1, right)
# print("m",a,b,m,e)
- merge(a, b, m, e)
- return a
+ merge(arr, left, mid, right)
+ return arr
if __name__ == "__main__":
|
{"golden_diff": "diff --git a/divide_and_conquer/mergesort.py b/divide_and_conquer/mergesort.py\n--- a/divide_and_conquer/mergesort.py\n+++ b/divide_and_conquer/mergesort.py\n@@ -1,45 +1,48 @@\n-def merge(a, b, m, e):\n- l = a[b : m + 1] # noqa: E741\n- r = a[m + 1 : e + 1]\n- k = b\n+def merge(arr, left, mid, right):\n+ # overall array will divided into 2 array\n+ # left_arr contains the left portion of array from left to mid\n+ # right_arr contains the right portion of array from mid + 1 to right\n+ left_arr = arr[left : mid + 1]\n+ right_arr = arr[mid + 1 : right + 1]\n+ k = left\n i = 0\n j = 0\n- while i < len(l) and j < len(r):\n+ while i < len(left_arr) and j < len(right_arr):\n # change sign for Descending order\n- if l[i] < r[j]:\n- a[k] = l[i]\n+ if left_arr[i] < right_arr[j]:\n+ arr[k] = left_arr[i]\n i += 1\n else:\n- a[k] = r[j]\n+ arr[k] = right_arr[j]\n j += 1\n k += 1\n- while i < len(l):\n- a[k] = l[i]\n+ while i < len(left_arr):\n+ arr[k] = left_arr[i]\n i += 1\n k += 1\n- while j < len(r):\n- a[k] = r[j]\n+ while j < len(right_arr):\n+ arr[k] = right_arr[j]\n j += 1\n k += 1\n- return a\n+ return arr\n \n \n-def mergesort(a, b, e):\n+def mergesort(arr, left, right):\n \"\"\"\n- >>> mergesort([3,2,1],0,2)\n+ >>> mergesort([3, 2, 1], 0, 2)\n [1, 2, 3]\n- >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)\n+ >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n- if b < e:\n- m = (b + e) // 2\n+ if left < right:\n+ mid = (left + right) // 2\n # print(\"ms1\",a,b,m)\n- mergesort(a, b, m)\n+ mergesort(arr, left, mid)\n # print(\"ms2\",a,m+1,e)\n- mergesort(a, m + 1, e)\n+ mergesort(arr, mid + 1, right)\n # print(\"m\",a,b,m,e)\n- merge(a, b, m, e)\n- return a\n+ merge(arr, left, mid, right)\n+ return arr\n \n \n if __name__ == \"__main__\":\n", "issue": "Mergesort Update Variable Names\nI was looking over the mergesort.py file in the divide_and_conquer directory when I saw that all of the variable names are a single letter and there is not much documentation. Does anyone know enough about this file to improve the variable names and make the code more understandable?\n", "before_files": [{"content": "def merge(a, b, m, e):\n l = a[b : m + 1] # noqa: E741\n r = a[m + 1 : e + 1]\n k = b\n i = 0\n j = 0\n while i < len(l) and j < len(r):\n # change sign for Descending order\n if l[i] < r[j]:\n a[k] = l[i]\n i += 1\n else:\n a[k] = r[j]\n j += 1\n k += 1\n while i < len(l):\n a[k] = l[i]\n i += 1\n k += 1\n while j < len(r):\n a[k] = r[j]\n j += 1\n k += 1\n return a\n\n\ndef mergesort(a, b, e):\n \"\"\"\n >>> mergesort([3,2,1],0,2)\n [1, 2, 3]\n >>> mergesort([3,2,1,0,1,2,3,5,4],0,8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n if b < e:\n m = (b + e) // 2\n # print(\"ms1\",a,b,m)\n mergesort(a, b, m)\n # print(\"ms2\",a,m+1,e)\n mergesort(a, m + 1, e)\n # print(\"m\",a,b,m,e)\n merge(a, b, m, e)\n return a\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "divide_and_conquer/mergesort.py"}], "after_files": [{"content": "def merge(arr, left, mid, right):\n # overall array will divided into 2 array\n # left_arr contains the left portion of array from left to mid\n # right_arr contains the right portion of array from mid + 1 to right\n left_arr = arr[left : mid + 1]\n right_arr = arr[mid + 1 : right + 1]\n k = left\n i = 0\n j = 0\n while i < len(left_arr) and j < len(right_arr):\n # change sign for Descending order\n if left_arr[i] < right_arr[j]:\n arr[k] = left_arr[i]\n i += 1\n else:\n arr[k] = right_arr[j]\n j += 1\n k += 1\n while i < len(left_arr):\n arr[k] = left_arr[i]\n i += 1\n k += 1\n while j < len(right_arr):\n arr[k] = right_arr[j]\n j += 1\n k += 1\n return arr\n\n\ndef mergesort(arr, left, right):\n \"\"\"\n >>> mergesort([3, 2, 1], 0, 2)\n [1, 2, 3]\n >>> mergesort([3, 2, 1, 0, 1, 2, 3, 5, 4], 0, 8)\n [0, 1, 1, 2, 2, 3, 3, 4, 5]\n \"\"\"\n if left < right:\n mid = (left + right) // 2\n # print(\"ms1\",a,b,m)\n mergesort(arr, left, mid)\n # print(\"ms2\",a,m+1,e)\n mergesort(arr, mid + 1, right)\n # print(\"m\",a,b,m,e)\n merge(arr, left, mid, right)\n return arr\n\n\nif __name__ == \"__main__\":\n import doctest\n\n doctest.testmod()\n", "path": "divide_and_conquer/mergesort.py"}]}
| 807 | 773 |
gh_patches_debug_34494
|
rasdani/github-patches
|
git_diff
|
techmatters__terraso-backend-238
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot delete users who have uploaded shared files
## Description
Attempting to delete a user who has uploaded files will give an error like so
```
Cannot delete user
Deleting the selected user would require deleting the following protected related objects:
Data entry: acBie9x4 WieezMsPbKL4P2
Data entry: KoBo question set
Data entry: myfile
Data entry: plus+sign+cool
Data entry: acBie9x4WieezMsPbKL4P2
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/core/models/users.py`
Content:
```
1 import uuid
2
3 from django.contrib.auth.models import AbstractUser, BaseUserManager
4 from django.db import models
5 from safedelete.models import SOFT_DELETE_CASCADE, SafeDeleteManager, SafeDeleteModel
6
7
8 class UserManager(SafeDeleteManager, BaseUserManager):
9 use_in_migrations = True
10
11 def _create_user(self, email, password, **extra_fields):
12 """Create and save a User with the given email and password."""
13 if not email:
14 raise ValueError("The given email must be set")
15
16 email = self.normalize_email(email)
17 user = self.model(email=email, **extra_fields)
18 user.set_password(password)
19 user.save(using=self._db)
20
21 return user
22
23 def create_user(self, email, password=None, **extra_fields):
24 """Create and save a regular User with the given email and password."""
25 extra_fields.setdefault("is_staff", False)
26 extra_fields.setdefault("is_superuser", False)
27 return self._create_user(email, password, **extra_fields)
28
29 def create_superuser(self, email, password, **extra_fields):
30 """Create and save a SuperUser with the given email and password."""
31 extra_fields.setdefault("is_staff", True)
32 extra_fields.setdefault("is_superuser", True)
33
34 if extra_fields.get("is_staff") is not True:
35 raise ValueError("Superuser must have is_staff=True.")
36 if extra_fields.get("is_superuser") is not True:
37 raise ValueError("Superuser must have is_superuser=True.")
38
39 return self._create_user(email, password, **extra_fields)
40
41
42 class User(SafeDeleteModel, AbstractUser):
43 """This model represents a User on Terraso platform."""
44
45 fields_to_trim = ["first_name", "last_name"]
46
47 _safedelete_policy = SOFT_DELETE_CASCADE
48
49 id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
50 created_at = models.DateTimeField(auto_now_add=True)
51 updated_at = models.DateTimeField(auto_now=True)
52
53 username = None
54 email = models.EmailField()
55 profile_image = models.URLField(blank=True, default="")
56
57 USERNAME_FIELD = "email"
58 REQUIRED_FIELDS = []
59
60 objects = UserManager()
61
62 class Meta:
63 get_latest_by = "created_at"
64 ordering = ["-created_at"]
65 constraints = (
66 models.UniqueConstraint(
67 fields=("email",),
68 condition=models.Q(deleted_at__isnull=True),
69 name="unique_active_email",
70 ),
71 )
72
73 def save(self, *args, **kwargs):
74 for field in self.fields_to_trim:
75 setattr(self, field, getattr(self, field).strip())
76 return super().save(*args, **kwargs)
77
78 def is_landscape_manager(self, landscape_id):
79 return (
80 self.memberships.managers_only()
81 .filter(
82 group__associated_landscapes__is_default_landscape_group=True,
83 group__associated_landscapes__landscape__pk=landscape_id,
84 )
85 .exists()
86 )
87
88 def is_group_manager(self, group_id):
89 return self.memberships.managers_only().filter(group__pk=group_id).exists()
90
91 def __str__(self):
92 return self.email
93
94
95 class UserPreference(models.Model):
96 id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
97 created_at = models.DateTimeField(auto_now_add=True)
98 updated_at = models.DateTimeField(auto_now=True)
99 key = models.CharField(max_length=128)
100 value = models.CharField(max_length=512, blank=True, default="")
101
102 user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="preferences")
103
104 class Meta:
105 constraints = (
106 models.UniqueConstraint(
107 fields=("key", "user"),
108 name="unique_user_preference",
109 ),
110 )
111
```
Path: `terraso_backend/apps/shared_data/models/data_entries.py`
Content:
```
1 from django.db import models
2 from django.utils import timezone
3 from django.utils.translation import gettext_lazy as _
4
5 from apps.core.models import BaseModel, Group, User
6 from apps.shared_data import permission_rules as perm_rules
7 from apps.shared_data.services import DataEntryFileStorage
8
9
10 class DataEntry(BaseModel):
11 """
12 Data Entry stores information about resources (usually files) that contain
13 different kind of data used by Landscape managers. Common resource types are
14 csv, xls and JSON files.
15
16 A Data Entry can point to internal or external resources. An internal
17 resource is stored on Terraso's infrastructure and an external resource is
18 stored out of the Terraso's infrastructure. In both cases, the Data Entry
19 only has the URL for that resource as a link to it.
20
21 Attributes
22 ----------
23 name: str
24 any user given name for that resource
25 description: str
26 a longer description explaining the resource
27 resource_type: str
28 the 'technical' type of the resource, usually the mime type
29 url: str
30 the URL where the resource can be accessed
31
32 groups: ManyToManyField(Group)
33 Groups where the resource is linked to (shared)
34 created_by: User
35 User who created the resource
36 """
37
38 name = models.CharField(max_length=128)
39 description = models.TextField(blank=True, default="")
40
41 ENTRY_TYPE_FILE = "file"
42 ENTRY_TYPE_LINK = "link"
43 MEMBERSHIP_TYPES = (
44 (ENTRY_TYPE_FILE, _("File")),
45 (ENTRY_TYPE_LINK, _("Link")),
46 )
47 entry_type = models.CharField(
48 max_length=32,
49 choices=MEMBERSHIP_TYPES,
50 )
51
52 resource_type = models.CharField(max_length=255, blank=True, default="")
53 url = models.URLField()
54 size = models.PositiveBigIntegerField(null=True, blank=True)
55
56 groups = models.ManyToManyField(Group, related_name="data_entries")
57 created_by = models.ForeignKey(User, on_delete=models.PROTECT)
58 file_removed_at = models.DateTimeField(blank=True, null=True)
59
60 class Meta(BaseModel.Meta):
61 verbose_name_plural = "Data Entries"
62 rules_permissions = {
63 "change": perm_rules.allowed_to_change_data_entry,
64 "delete": perm_rules.allowed_to_delete_data_entry,
65 "view": perm_rules.allowed_to_view_data_entry,
66 }
67
68 @property
69 def s3_object_name(self):
70 object_name = "/".join(self.url.split("/")[-2:]) if self.url else ""
71
72 # We want to put back the space character so the sign url works properly
73 object_name = object_name.replace("%20", " ")
74 return object_name
75
76 @property
77 def signed_url(self):
78 storage = DataEntryFileStorage(custom_domain=None)
79 return storage.url(self.s3_object_name)
80
81 def delete_file_on_storage(self):
82 if not self.deleted_at:
83 raise RuntimeError(
84 f"Storage object cannot be deleted if its DataEntry ({self.id}) is not deleted."
85 )
86
87 if self.file_removed_at:
88 return
89
90 storage = DataEntryFileStorage(custom_domain=None)
91 storage.delete(self.s3_object_name)
92 self.file_removed_at = timezone.now()
93 self.save(keep_deleted=True)
94
95 def to_dict(self):
96 return dict(
97 id=str(self.id),
98 name=self.name,
99 entry_type=self.entry_type,
100 description=self.description,
101 url=self.signed_url,
102 resource_type=self.resource_type,
103 size=self.size,
104 created_by=str(self.created_by.id),
105 groups=[str(group.id) for group in self.groups.all()],
106 )
107
108 def __str__(self):
109 return self.name
110
111 @classmethod
112 def get_entry_type_from_text(cls, entry_type):
113 if entry_type and entry_type.lower() == cls.ENTRY_TYPE_FILE:
114 return cls.ENTRY_TYPE_FILE
115 return cls.ENTRY_TYPE_LINK
116
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/terraso_backend/apps/core/models/users.py b/terraso_backend/apps/core/models/users.py
--- a/terraso_backend/apps/core/models/users.py
+++ b/terraso_backend/apps/core/models/users.py
@@ -85,6 +85,17 @@
.exists()
)
+ def soft_delete_policy_action(self, **kwargs):
+ """Relink files to deleted user. The default policy is to set the `created_by` field to
+ null if the user is deleted. However, for a soft deletion we want to keep this link. That
+ way if the user is restored, the created_by is still pointing to the same place."""
+ linked_dataentries = self.dataentry_set.all()
+ delete_response = super().soft_delete_policy_action()
+ for entry in linked_dataentries:
+ entry.created_by = self
+ entry.save()
+ return delete_response
+
def is_group_manager(self, group_id):
return self.memberships.managers_only().filter(group__pk=group_id).exists()
diff --git a/terraso_backend/apps/shared_data/models/data_entries.py b/terraso_backend/apps/shared_data/models/data_entries.py
--- a/terraso_backend/apps/shared_data/models/data_entries.py
+++ b/terraso_backend/apps/shared_data/models/data_entries.py
@@ -1,6 +1,7 @@
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _
+from safedelete.models import SOFT_DELETE
from apps.core.models import BaseModel, Group, User
from apps.shared_data import permission_rules as perm_rules
@@ -35,6 +36,9 @@
User who created the resource
"""
+ # file will not be deleted in cascade
+ _safedelete_policy = SOFT_DELETE
+
name = models.CharField(max_length=128)
description = models.TextField(blank=True, default="")
@@ -54,7 +58,7 @@
size = models.PositiveBigIntegerField(null=True, blank=True)
groups = models.ManyToManyField(Group, related_name="data_entries")
- created_by = models.ForeignKey(User, on_delete=models.PROTECT)
+ created_by = models.ForeignKey(User, null=True, on_delete=models.DO_NOTHING)
file_removed_at = models.DateTimeField(blank=True, null=True)
class Meta(BaseModel.Meta):
|
{"golden_diff": "diff --git a/terraso_backend/apps/core/models/users.py b/terraso_backend/apps/core/models/users.py\n--- a/terraso_backend/apps/core/models/users.py\n+++ b/terraso_backend/apps/core/models/users.py\n@@ -85,6 +85,17 @@\n .exists()\n )\n \n+ def soft_delete_policy_action(self, **kwargs):\n+ \"\"\"Relink files to deleted user. The default policy is to set the `created_by` field to\n+ null if the user is deleted. However, for a soft deletion we want to keep this link. That\n+ way if the user is restored, the created_by is still pointing to the same place.\"\"\"\n+ linked_dataentries = self.dataentry_set.all()\n+ delete_response = super().soft_delete_policy_action()\n+ for entry in linked_dataentries:\n+ entry.created_by = self\n+ entry.save()\n+ return delete_response\n+\n def is_group_manager(self, group_id):\n return self.memberships.managers_only().filter(group__pk=group_id).exists()\n \ndiff --git a/terraso_backend/apps/shared_data/models/data_entries.py b/terraso_backend/apps/shared_data/models/data_entries.py\n--- a/terraso_backend/apps/shared_data/models/data_entries.py\n+++ b/terraso_backend/apps/shared_data/models/data_entries.py\n@@ -1,6 +1,7 @@\n from django.db import models\n from django.utils import timezone\n from django.utils.translation import gettext_lazy as _\n+from safedelete.models import SOFT_DELETE\n \n from apps.core.models import BaseModel, Group, User\n from apps.shared_data import permission_rules as perm_rules\n@@ -35,6 +36,9 @@\n User who created the resource\n \"\"\"\n \n+ # file will not be deleted in cascade\n+ _safedelete_policy = SOFT_DELETE\n+\n name = models.CharField(max_length=128)\n description = models.TextField(blank=True, default=\"\")\n \n@@ -54,7 +58,7 @@\n size = models.PositiveBigIntegerField(null=True, blank=True)\n \n groups = models.ManyToManyField(Group, related_name=\"data_entries\")\n- created_by = models.ForeignKey(User, on_delete=models.PROTECT)\n+ created_by = models.ForeignKey(User, null=True, on_delete=models.DO_NOTHING)\n file_removed_at = models.DateTimeField(blank=True, null=True)\n \n class Meta(BaseModel.Meta):\n", "issue": "Cannot delete users who have uploaded shared files\n## Description\r\nAttempting to delete a user who has uploaded files will give an error like so\r\n\r\n```\r\nCannot delete user\r\nDeleting the selected user would require deleting the following protected related objects:\r\n\r\nData entry: acBie9x4 WieezMsPbKL4P2\r\nData entry: KoBo question set\r\nData entry: myfile\r\nData entry: plus+sign+cool\r\nData entry: acBie9x4WieezMsPbKL4P2\r\n\r\n```\r\n\n", "before_files": [{"content": "import uuid\n\nfrom django.contrib.auth.models import AbstractUser, BaseUserManager\nfrom django.db import models\nfrom safedelete.models import SOFT_DELETE_CASCADE, SafeDeleteManager, SafeDeleteModel\n\n\nclass UserManager(SafeDeleteManager, BaseUserManager):\n use_in_migrations = True\n\n def _create_user(self, email, password, **extra_fields):\n \"\"\"Create and save a User with the given email and password.\"\"\"\n if not email:\n raise ValueError(\"The given email must be set\")\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user\n\n def create_user(self, email, password=None, **extra_fields):\n \"\"\"Create and save a regular User with the given email and password.\"\"\"\n extra_fields.setdefault(\"is_staff\", False)\n extra_fields.setdefault(\"is_superuser\", False)\n return self._create_user(email, password, **extra_fields)\n\n def create_superuser(self, email, password, **extra_fields):\n \"\"\"Create and save a SuperUser with the given email and password.\"\"\"\n extra_fields.setdefault(\"is_staff\", True)\n extra_fields.setdefault(\"is_superuser\", True)\n\n if extra_fields.get(\"is_staff\") is not True:\n raise ValueError(\"Superuser must have is_staff=True.\")\n if extra_fields.get(\"is_superuser\") is not True:\n raise ValueError(\"Superuser must have is_superuser=True.\")\n\n return self._create_user(email, password, **extra_fields)\n\n\nclass User(SafeDeleteModel, AbstractUser):\n \"\"\"This model represents a User on Terraso platform.\"\"\"\n\n fields_to_trim = [\"first_name\", \"last_name\"]\n\n _safedelete_policy = SOFT_DELETE_CASCADE\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n username = None\n email = models.EmailField()\n profile_image = models.URLField(blank=True, default=\"\")\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = []\n\n objects = UserManager()\n\n class Meta:\n get_latest_by = \"created_at\"\n ordering = [\"-created_at\"]\n constraints = (\n models.UniqueConstraint(\n fields=(\"email\",),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_email\",\n ),\n )\n\n def save(self, *args, **kwargs):\n for field in self.fields_to_trim:\n setattr(self, field, getattr(self, field).strip())\n return super().save(*args, **kwargs)\n\n def is_landscape_manager(self, landscape_id):\n return (\n self.memberships.managers_only()\n .filter(\n group__associated_landscapes__is_default_landscape_group=True,\n group__associated_landscapes__landscape__pk=landscape_id,\n )\n .exists()\n )\n\n def is_group_manager(self, group_id):\n return self.memberships.managers_only().filter(group__pk=group_id).exists()\n\n def __str__(self):\n return self.email\n\n\nclass UserPreference(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n key = models.CharField(max_length=128)\n value = models.CharField(max_length=512, blank=True, default=\"\")\n\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"preferences\")\n\n class Meta:\n constraints = (\n models.UniqueConstraint(\n fields=(\"key\", \"user\"),\n name=\"unique_user_preference\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/users.py"}, {"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\n\nfrom apps.core.models import BaseModel, Group, User\nfrom apps.shared_data import permission_rules as perm_rules\nfrom apps.shared_data.services import DataEntryFileStorage\n\n\nclass DataEntry(BaseModel):\n \"\"\"\n Data Entry stores information about resources (usually files) that contain\n different kind of data used by Landscape managers. Common resource types are\n csv, xls and JSON files.\n\n A Data Entry can point to internal or external resources. An internal\n resource is stored on Terraso's infrastructure and an external resource is\n stored out of the Terraso's infrastructure. In both cases, the Data Entry\n only has the URL for that resource as a link to it.\n\n Attributes\n ----------\n name: str\n any user given name for that resource\n description: str\n a longer description explaining the resource\n resource_type: str\n the 'technical' type of the resource, usually the mime type\n url: str\n the URL where the resource can be accessed\n\n groups: ManyToManyField(Group)\n Groups where the resource is linked to (shared)\n created_by: User\n User who created the resource\n \"\"\"\n\n name = models.CharField(max_length=128)\n description = models.TextField(blank=True, default=\"\")\n\n ENTRY_TYPE_FILE = \"file\"\n ENTRY_TYPE_LINK = \"link\"\n MEMBERSHIP_TYPES = (\n (ENTRY_TYPE_FILE, _(\"File\")),\n (ENTRY_TYPE_LINK, _(\"Link\")),\n )\n entry_type = models.CharField(\n max_length=32,\n choices=MEMBERSHIP_TYPES,\n )\n\n resource_type = models.CharField(max_length=255, blank=True, default=\"\")\n url = models.URLField()\n size = models.PositiveBigIntegerField(null=True, blank=True)\n\n groups = models.ManyToManyField(Group, related_name=\"data_entries\")\n created_by = models.ForeignKey(User, on_delete=models.PROTECT)\n file_removed_at = models.DateTimeField(blank=True, null=True)\n\n class Meta(BaseModel.Meta):\n verbose_name_plural = \"Data Entries\"\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_data_entry,\n \"delete\": perm_rules.allowed_to_delete_data_entry,\n \"view\": perm_rules.allowed_to_view_data_entry,\n }\n\n @property\n def s3_object_name(self):\n object_name = \"/\".join(self.url.split(\"/\")[-2:]) if self.url else \"\"\n\n # We want to put back the space character so the sign url works properly\n object_name = object_name.replace(\"%20\", \" \")\n return object_name\n\n @property\n def signed_url(self):\n storage = DataEntryFileStorage(custom_domain=None)\n return storage.url(self.s3_object_name)\n\n def delete_file_on_storage(self):\n if not self.deleted_at:\n raise RuntimeError(\n f\"Storage object cannot be deleted if its DataEntry ({self.id}) is not deleted.\"\n )\n\n if self.file_removed_at:\n return\n\n storage = DataEntryFileStorage(custom_domain=None)\n storage.delete(self.s3_object_name)\n self.file_removed_at = timezone.now()\n self.save(keep_deleted=True)\n\n def to_dict(self):\n return dict(\n id=str(self.id),\n name=self.name,\n entry_type=self.entry_type,\n description=self.description,\n url=self.signed_url,\n resource_type=self.resource_type,\n size=self.size,\n created_by=str(self.created_by.id),\n groups=[str(group.id) for group in self.groups.all()],\n )\n\n def __str__(self):\n return self.name\n\n @classmethod\n def get_entry_type_from_text(cls, entry_type):\n if entry_type and entry_type.lower() == cls.ENTRY_TYPE_FILE:\n return cls.ENTRY_TYPE_FILE\n return cls.ENTRY_TYPE_LINK\n", "path": "terraso_backend/apps/shared_data/models/data_entries.py"}], "after_files": [{"content": "import uuid\n\nfrom django.contrib.auth.models import AbstractUser, BaseUserManager\nfrom django.db import models\nfrom safedelete.models import SOFT_DELETE_CASCADE, SafeDeleteManager, SafeDeleteModel\n\n\nclass UserManager(SafeDeleteManager, BaseUserManager):\n use_in_migrations = True\n\n def _create_user(self, email, password, **extra_fields):\n \"\"\"Create and save a User with the given email and password.\"\"\"\n if not email:\n raise ValueError(\"The given email must be set\")\n\n email = self.normalize_email(email)\n user = self.model(email=email, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user\n\n def create_user(self, email, password=None, **extra_fields):\n \"\"\"Create and save a regular User with the given email and password.\"\"\"\n extra_fields.setdefault(\"is_staff\", False)\n extra_fields.setdefault(\"is_superuser\", False)\n return self._create_user(email, password, **extra_fields)\n\n def create_superuser(self, email, password, **extra_fields):\n \"\"\"Create and save a SuperUser with the given email and password.\"\"\"\n extra_fields.setdefault(\"is_staff\", True)\n extra_fields.setdefault(\"is_superuser\", True)\n\n if extra_fields.get(\"is_staff\") is not True:\n raise ValueError(\"Superuser must have is_staff=True.\")\n if extra_fields.get(\"is_superuser\") is not True:\n raise ValueError(\"Superuser must have is_superuser=True.\")\n\n return self._create_user(email, password, **extra_fields)\n\n\nclass User(SafeDeleteModel, AbstractUser):\n \"\"\"This model represents a User on Terraso platform.\"\"\"\n\n fields_to_trim = [\"first_name\", \"last_name\"]\n\n _safedelete_policy = SOFT_DELETE_CASCADE\n\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n username = None\n email = models.EmailField()\n profile_image = models.URLField(blank=True, default=\"\")\n\n USERNAME_FIELD = \"email\"\n REQUIRED_FIELDS = []\n\n objects = UserManager()\n\n class Meta:\n get_latest_by = \"created_at\"\n ordering = [\"-created_at\"]\n constraints = (\n models.UniqueConstraint(\n fields=(\"email\",),\n condition=models.Q(deleted_at__isnull=True),\n name=\"unique_active_email\",\n ),\n )\n\n def save(self, *args, **kwargs):\n for field in self.fields_to_trim:\n setattr(self, field, getattr(self, field).strip())\n return super().save(*args, **kwargs)\n\n def is_landscape_manager(self, landscape_id):\n return (\n self.memberships.managers_only()\n .filter(\n group__associated_landscapes__is_default_landscape_group=True,\n group__associated_landscapes__landscape__pk=landscape_id,\n )\n .exists()\n )\n\n def soft_delete_policy_action(self, **kwargs):\n \"\"\"Relink files to deleted user. The default policy is to set the `created_by` field to\n null if the user is deleted. However, for a soft deletion we want to keep this link. That\n way if the user is restored, the created_by is still pointing to the same place.\"\"\"\n linked_dataentries = self.dataentry_set.all()\n delete_response = super().soft_delete_policy_action()\n for entry in linked_dataentries:\n entry.created_by = self\n entry.save()\n return delete_response\n\n def is_group_manager(self, group_id):\n return self.memberships.managers_only().filter(group__pk=group_id).exists()\n\n def __str__(self):\n return self.email\n\n\nclass UserPreference(models.Model):\n id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n key = models.CharField(max_length=128)\n value = models.CharField(max_length=512, blank=True, default=\"\")\n\n user = models.ForeignKey(User, on_delete=models.CASCADE, related_name=\"preferences\")\n\n class Meta:\n constraints = (\n models.UniqueConstraint(\n fields=(\"key\", \"user\"),\n name=\"unique_user_preference\",\n ),\n )\n", "path": "terraso_backend/apps/core/models/users.py"}, {"content": "from django.db import models\nfrom django.utils import timezone\nfrom django.utils.translation import gettext_lazy as _\nfrom safedelete.models import SOFT_DELETE\n\nfrom apps.core.models import BaseModel, Group, User\nfrom apps.shared_data import permission_rules as perm_rules\nfrom apps.shared_data.services import DataEntryFileStorage\n\n\nclass DataEntry(BaseModel):\n \"\"\"\n Data Entry stores information about resources (usually files) that contain\n different kind of data used by Landscape managers. Common resource types are\n csv, xls and JSON files.\n\n A Data Entry can point to internal or external resources. An internal\n resource is stored on Terraso's infrastructure and an external resource is\n stored out of the Terraso's infrastructure. In both cases, the Data Entry\n only has the URL for that resource as a link to it.\n\n Attributes\n ----------\n name: str\n any user given name for that resource\n description: str\n a longer description explaining the resource\n resource_type: str\n the 'technical' type of the resource, usually the mime type\n url: str\n the URL where the resource can be accessed\n\n groups: ManyToManyField(Group)\n Groups where the resource is linked to (shared)\n created_by: User\n User who created the resource\n \"\"\"\n\n # file will not be deleted in cascade\n _safedelete_policy = SOFT_DELETE\n\n name = models.CharField(max_length=128)\n description = models.TextField(blank=True, default=\"\")\n\n ENTRY_TYPE_FILE = \"file\"\n ENTRY_TYPE_LINK = \"link\"\n MEMBERSHIP_TYPES = (\n (ENTRY_TYPE_FILE, _(\"File\")),\n (ENTRY_TYPE_LINK, _(\"Link\")),\n )\n entry_type = models.CharField(\n max_length=32,\n choices=MEMBERSHIP_TYPES,\n )\n\n resource_type = models.CharField(max_length=255, blank=True, default=\"\")\n url = models.URLField()\n size = models.PositiveBigIntegerField(null=True, blank=True)\n\n groups = models.ManyToManyField(Group, related_name=\"data_entries\")\n created_by = models.ForeignKey(User, null=True, on_delete=models.DO_NOTHING)\n file_removed_at = models.DateTimeField(blank=True, null=True)\n\n class Meta(BaseModel.Meta):\n verbose_name_plural = \"Data Entries\"\n rules_permissions = {\n \"change\": perm_rules.allowed_to_change_data_entry,\n \"delete\": perm_rules.allowed_to_delete_data_entry,\n \"view\": perm_rules.allowed_to_view_data_entry,\n }\n\n @property\n def s3_object_name(self):\n object_name = \"/\".join(self.url.split(\"/\")[-2:]) if self.url else \"\"\n\n # We want to put back the space character so the sign url works properly\n object_name = object_name.replace(\"%20\", \" \")\n return object_name\n\n @property\n def signed_url(self):\n storage = DataEntryFileStorage(custom_domain=None)\n return storage.url(self.s3_object_name)\n\n def delete_file_on_storage(self):\n if not self.deleted_at:\n raise RuntimeError(\n f\"Storage object cannot be deleted if its DataEntry ({self.id}) is not deleted.\"\n )\n\n if self.file_removed_at:\n return\n\n storage = DataEntryFileStorage(custom_domain=None)\n storage.delete(self.s3_object_name)\n self.file_removed_at = timezone.now()\n self.save(keep_deleted=True)\n\n def to_dict(self):\n return dict(\n id=str(self.id),\n name=self.name,\n entry_type=self.entry_type,\n description=self.description,\n url=self.signed_url,\n resource_type=self.resource_type,\n size=self.size,\n created_by=str(self.created_by.id),\n groups=[str(group.id) for group in self.groups.all()],\n )\n\n def __str__(self):\n return self.name\n\n @classmethod\n def get_entry_type_from_text(cls, entry_type):\n if entry_type and entry_type.lower() == cls.ENTRY_TYPE_FILE:\n return cls.ENTRY_TYPE_FILE\n return cls.ENTRY_TYPE_LINK\n", "path": "terraso_backend/apps/shared_data/models/data_entries.py"}]}
| 2,501 | 523 |
gh_patches_debug_13420
|
rasdani/github-patches
|
git_diff
|
comic__grand-challenge.org-786
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Partial update not working correctly because of custom validation
The validation rule in `app/grandchallenge/annotations/serializers.py:31` is breaking the partial update functionality.
If you try to do a partial update PATCH request to the endpoint, it will try to find the `annotation_set` attribute in the request data. If this is not present it will throw a KeyError.
This should be fixed by first checking if the key exists in the request data and only then running the validation check. The validation check is not needed if the key does not exist because it will then either not change (for partial update request) or throw a `field is required` validation error (for every other type of request).
I will fix this and add a test for it.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `app/grandchallenge/annotations/serializers.py`
Content:
```
1 from rest_framework import serializers
2
3 from .models import (
4 ETDRSGridAnnotation,
5 MeasurementAnnotation,
6 BooleanClassificationAnnotation,
7 PolygonAnnotationSet,
8 SinglePolygonAnnotation,
9 LandmarkAnnotationSet,
10 SingleLandmarkAnnotation,
11 )
12 from .validators import validate_grader_is_current_retina_user
13
14
15 class AbstractAnnotationSerializer(serializers.ModelSerializer):
16 def validate_grader(self, value):
17 """
18 Validate that grader is the user creating the object for retina_graders group
19 """
20 validate_grader_is_current_retina_user(value, self.context)
21 return value
22
23 class Meta:
24 abstract = True
25
26
27 class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):
28 def validate(self, data):
29 """
30 Validate that the user that is creating this object equals the annotation_set.grader for retina_graders
31 """
32 validate_grader_is_current_retina_user(
33 data["annotation_set"].grader, self.context
34 )
35 return data
36
37 class Meta:
38 abstract = True
39
40
41 class ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):
42 class Meta:
43 model = ETDRSGridAnnotation
44 fields = ("grader", "created", "image", "fovea", "optic_disk")
45
46
47 class MeasurementAnnotationSerializer(AbstractAnnotationSerializer):
48 class Meta:
49 model = MeasurementAnnotation
50 fields = ("image", "grader", "created", "start_voxel", "end_voxel")
51
52
53 class BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):
54 class Meta:
55 model = BooleanClassificationAnnotation
56 fields = ("image", "grader", "created", "name", "value")
57
58
59 class SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):
60 annotation_set = serializers.PrimaryKeyRelatedField(
61 queryset=PolygonAnnotationSet.objects.all()
62 )
63
64 class Meta:
65 model = SinglePolygonAnnotation
66 fields = ("id", "value", "annotation_set")
67
68
69 class PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):
70 singlepolygonannotation_set = SinglePolygonAnnotationSerializer(
71 many=True, read_only=True
72 )
73
74 class Meta:
75 model = PolygonAnnotationSet
76 fields = (
77 "id",
78 "image",
79 "grader",
80 "created",
81 "name",
82 "singlepolygonannotation_set",
83 )
84
85
86 class LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):
87 class Meta:
88 model = LandmarkAnnotationSet
89 fields = ("grader", "created")
90
91
92 class SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):
93 class Meta:
94 model = SingleLandmarkAnnotation
95 fields = ("image", "annotation_set", "landmarks")
96
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/app/grandchallenge/annotations/serializers.py b/app/grandchallenge/annotations/serializers.py
--- a/app/grandchallenge/annotations/serializers.py
+++ b/app/grandchallenge/annotations/serializers.py
@@ -27,11 +27,14 @@
class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):
def validate(self, data):
"""
- Validate that the user that is creating this object equals the annotation_set.grader for retina_graders
+ Validate that the user that is creating this object equals the
+ annotation_set.grader for retina_graders
"""
- validate_grader_is_current_retina_user(
- data["annotation_set"].grader, self.context
- )
+ if data.get("annotation_set") is None:
+ return data
+
+ grader = data["annotation_set"].grader
+ validate_grader_is_current_retina_user(grader, self.context)
return data
class Meta:
|
{"golden_diff": "diff --git a/app/grandchallenge/annotations/serializers.py b/app/grandchallenge/annotations/serializers.py\n--- a/app/grandchallenge/annotations/serializers.py\n+++ b/app/grandchallenge/annotations/serializers.py\n@@ -27,11 +27,14 @@\n class AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n- Validate that the user that is creating this object equals the annotation_set.grader for retina_graders\n+ Validate that the user that is creating this object equals the\n+ annotation_set.grader for retina_graders\n \"\"\"\n- validate_grader_is_current_retina_user(\n- data[\"annotation_set\"].grader, self.context\n- )\n+ if data.get(\"annotation_set\") is None:\n+ return data\n+\n+ grader = data[\"annotation_set\"].grader\n+ validate_grader_is_current_retina_user(grader, self.context)\n return data\n \n class Meta:\n", "issue": "Partial update not working correctly because of custom validation\nThe validation rule in `app/grandchallenge/annotations/serializers.py:31` is breaking the partial update functionality.\r\nIf you try to do a partial update PATCH request to the endpoint, it will try to find the `annotation_set` attribute in the request data. If this is not present it will throw a KeyError. \r\n\r\nThis should be fixed by first checking if the key exists in the request data and only then running the validation check. The validation check is not needed if the key does not exist because it will then either not change (for partial update request) or throw a `field is required` validation error (for every other type of request).\r\n\r\nI will fix this and add a test for it.\n", "before_files": [{"content": "from rest_framework import serializers\n\nfrom .models import (\n ETDRSGridAnnotation,\n MeasurementAnnotation,\n BooleanClassificationAnnotation,\n PolygonAnnotationSet,\n SinglePolygonAnnotation,\n LandmarkAnnotationSet,\n SingleLandmarkAnnotation,\n)\nfrom .validators import validate_grader_is_current_retina_user\n\n\nclass AbstractAnnotationSerializer(serializers.ModelSerializer):\n def validate_grader(self, value):\n \"\"\"\n Validate that grader is the user creating the object for retina_graders group\n \"\"\"\n validate_grader_is_current_retina_user(value, self.context)\n return value\n\n class Meta:\n abstract = True\n\n\nclass AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n Validate that the user that is creating this object equals the annotation_set.grader for retina_graders\n \"\"\"\n validate_grader_is_current_retina_user(\n data[\"annotation_set\"].grader, self.context\n )\n return data\n\n class Meta:\n abstract = True\n\n\nclass ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = ETDRSGridAnnotation\n fields = (\"grader\", \"created\", \"image\", \"fovea\", \"optic_disk\")\n\n\nclass MeasurementAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = MeasurementAnnotation\n fields = (\"image\", \"grader\", \"created\", \"start_voxel\", \"end_voxel\")\n\n\nclass BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = BooleanClassificationAnnotation\n fields = (\"image\", \"grader\", \"created\", \"name\", \"value\")\n\n\nclass SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):\n annotation_set = serializers.PrimaryKeyRelatedField(\n queryset=PolygonAnnotationSet.objects.all()\n )\n\n class Meta:\n model = SinglePolygonAnnotation\n fields = (\"id\", \"value\", \"annotation_set\")\n\n\nclass PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):\n singlepolygonannotation_set = SinglePolygonAnnotationSerializer(\n many=True, read_only=True\n )\n\n class Meta:\n model = PolygonAnnotationSet\n fields = (\n \"id\",\n \"image\",\n \"grader\",\n \"created\",\n \"name\",\n \"singlepolygonannotation_set\",\n )\n\n\nclass LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = LandmarkAnnotationSet\n fields = (\"grader\", \"created\")\n\n\nclass SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):\n class Meta:\n model = SingleLandmarkAnnotation\n fields = (\"image\", \"annotation_set\", \"landmarks\")\n", "path": "app/grandchallenge/annotations/serializers.py"}], "after_files": [{"content": "from rest_framework import serializers\n\nfrom .models import (\n ETDRSGridAnnotation,\n MeasurementAnnotation,\n BooleanClassificationAnnotation,\n PolygonAnnotationSet,\n SinglePolygonAnnotation,\n LandmarkAnnotationSet,\n SingleLandmarkAnnotation,\n)\nfrom .validators import validate_grader_is_current_retina_user\n\n\nclass AbstractAnnotationSerializer(serializers.ModelSerializer):\n def validate_grader(self, value):\n \"\"\"\n Validate that grader is the user creating the object for retina_graders group\n \"\"\"\n validate_grader_is_current_retina_user(value, self.context)\n return value\n\n class Meta:\n abstract = True\n\n\nclass AbstractSingleAnnotationSerializer(serializers.ModelSerializer):\n def validate(self, data):\n \"\"\"\n Validate that the user that is creating this object equals the\n annotation_set.grader for retina_graders\n \"\"\"\n if data.get(\"annotation_set\") is None:\n return data\n\n grader = data[\"annotation_set\"].grader\n validate_grader_is_current_retina_user(grader, self.context)\n return data\n\n class Meta:\n abstract = True\n\n\nclass ETDRSGridAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = ETDRSGridAnnotation\n fields = (\"grader\", \"created\", \"image\", \"fovea\", \"optic_disk\")\n\n\nclass MeasurementAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = MeasurementAnnotation\n fields = (\"image\", \"grader\", \"created\", \"start_voxel\", \"end_voxel\")\n\n\nclass BooleanClassificationAnnotationSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = BooleanClassificationAnnotation\n fields = (\"image\", \"grader\", \"created\", \"name\", \"value\")\n\n\nclass SinglePolygonAnnotationSerializer(AbstractSingleAnnotationSerializer):\n annotation_set = serializers.PrimaryKeyRelatedField(\n queryset=PolygonAnnotationSet.objects.all()\n )\n\n class Meta:\n model = SinglePolygonAnnotation\n fields = (\"id\", \"value\", \"annotation_set\")\n\n\nclass PolygonAnnotationSetSerializer(AbstractAnnotationSerializer):\n singlepolygonannotation_set = SinglePolygonAnnotationSerializer(\n many=True, read_only=True\n )\n\n class Meta:\n model = PolygonAnnotationSet\n fields = (\n \"id\",\n \"image\",\n \"grader\",\n \"created\",\n \"name\",\n \"singlepolygonannotation_set\",\n )\n\n\nclass LandmarkAnnotationSetSerializer(AbstractAnnotationSerializer):\n class Meta:\n model = LandmarkAnnotationSet\n fields = (\"grader\", \"created\")\n\n\nclass SingleLandmarkAnnotationSerializer(AbstractSingleAnnotationSerializer):\n class Meta:\n model = SingleLandmarkAnnotation\n fields = (\"image\", \"annotation_set\", \"landmarks\")\n", "path": "app/grandchallenge/annotations/serializers.py"}]}
| 1,154 | 214 |
gh_patches_debug_23425
|
rasdani/github-patches
|
git_diff
|
ytdl-org__youtube-dl-13773
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[MLB] ERROR: Unable to extract video id
## Please follow the guide below
- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly
- Put an `x` into all the boxes [ ] relevant to your *issue* (like that [x])
- Use *Preview* tab to see how your issue will actually look like
---
### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.07.23*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.
- [X] I've **verified** and **I assure** that I'm running youtube-dl **2017.07.23**
### Before submitting an *issue* make sure you have:
- [X] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections
- [X] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones
### What is the purpose of your *issue*?
- [X] Bug report (encountered problems with youtube-dl)
- [ ] Site support request (request for adding support for a new site)
- [ ] Feature request (request for a new functionality)
- [ ] Question
- [ ] Other
---
### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*
---
### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:
Add `-v` flag to **your command line** you run youtube-dl with, copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):
```
youtube-dl.py -v "https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694"
[debug] System config: []
[debug] User config: []
[debug] Custom config: []
[debug] Command-line args: ['-v', 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694']
[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252
[debug] youtube-dl version 2017.07.23
[debug] Python version 3.5.1 - Windows-7-6.1.7601-SP1
[debug] exe versions: ffmpeg N-71727-g46778ab, ffprobe 3.2, rtmpdump 2.4
[debug] Proxy map: {}
[MLB] c-1352023483?tid=67793694: Downloading webpage
ERROR: Unable to extract video id; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the -
-verbose flag and include its complete output.
Traceback (most recent call last):
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\YoutubeDL.py", line 776, in extract_info
ie_result = ie.extract(url)
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\extractor\common.py", line 433, in extract
ie_result = self._real_extract(url)
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\extractor\mlb.py", line 132, in _real_extract
[r'data-video-?id="(\d+)"', r'content_id=(\d+)'], webpage, 'video id')
File "C:\Transmogrifier\youtube-dl.py\youtube_dl\extractor\common.py", line 782, in _search_regex
raise RegexNotFoundError('Unable to extract %s' % _name)
youtube_dl.utils.RegexNotFoundError: Unable to extract video id; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure
to call youtube-dl with the --verbose flag and include its complete output.
```
---
### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):
- Single video: https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694
Thanks
Ringo
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `youtube_dl/extractor/mlb.py`
Content:
```
1 from __future__ import unicode_literals
2
3 import re
4
5 from .common import InfoExtractor
6 from ..utils import (
7 parse_duration,
8 parse_iso8601,
9 )
10
11
12 class MLBIE(InfoExtractor):
13 _VALID_URL = r'''(?x)
14 https?://
15 (?:[\da-z_-]+\.)*mlb\.com/
16 (?:
17 (?:
18 (?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|
19 (?:
20 shared/video/embed/(?:embed|m-internal-embed)\.html|
21 (?:[^/]+/)+(?:play|index)\.jsp|
22 )\?.*?\bcontent_id=
23 )
24 (?P<id>n?\d+)|
25 (?:[^/]+/)*(?P<path>[^/]+)
26 )
27 '''
28 _TESTS = [
29 {
30 'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',
31 'md5': 'ff56a598c2cf411a9a38a69709e97079',
32 'info_dict': {
33 'id': '34698933',
34 'ext': 'mp4',
35 'title': "Ackley's spectacular catch",
36 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',
37 'duration': 66,
38 'timestamp': 1405980600,
39 'upload_date': '20140721',
40 'thumbnail': r're:^https?://.*\.jpg$',
41 },
42 },
43 {
44 'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby',
45 'md5': 'd9c022c10d21f849f49c05ae12a8a7e9',
46 'info_dict': {
47 'id': '34496663',
48 'ext': 'mp4',
49 'title': 'Stanton prepares for Derby',
50 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',
51 'duration': 46,
52 'timestamp': 1405105800,
53 'upload_date': '20140711',
54 'thumbnail': r're:^https?://.*\.jpg$',
55 },
56 },
57 {
58 'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby',
59 'md5': '0e6e73d509321e142409b695eadd541f',
60 'info_dict': {
61 'id': '34578115',
62 'ext': 'mp4',
63 'title': 'Cespedes repeats as Derby champ',
64 'description': 'md5:08df253ce265d4cf6fb09f581fafad07',
65 'duration': 488,
66 'timestamp': 1405399936,
67 'upload_date': '20140715',
68 'thumbnail': r're:^https?://.*\.jpg$',
69 },
70 },
71 {
72 'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance',
73 'md5': 'b8fd237347b844365d74ea61d4245967',
74 'info_dict': {
75 'id': '34577915',
76 'ext': 'mp4',
77 'title': 'Bautista on Home Run Derby',
78 'description': 'md5:b80b34031143d0986dddc64a8839f0fb',
79 'duration': 52,
80 'timestamp': 1405390722,
81 'upload_date': '20140715',
82 'thumbnail': r're:^https?://.*\.jpg$',
83 },
84 },
85 {
86 'url': 'http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer',
87 'md5': 'b190e70141fb9a1552a85426b4da1b5d',
88 'info_dict': {
89 'id': '75609783',
90 'ext': 'mp4',
91 'title': 'Must C: Pillar climbs for catch',
92 'description': '4/15/15: Blue Jays outfielder Kevin Pillar continues his defensive dominance by climbing the wall in left to rob Tim Beckham of a home run',
93 'timestamp': 1429124820,
94 'upload_date': '20150415',
95 }
96 },
97 {
98 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
99 'only_matching': True,
100 },
101 {
102 'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',
103 'only_matching': True,
104 },
105 {
106 'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',
107 'only_matching': True,
108 },
109 {
110 'url': 'http://m.cardinals.mlb.com/stl/video/v51175783/atlstl-piscotty-makes-great-sliding-catch-on-line/?partnerId=as_mlb_20150321_42500876&adbid=579409712979910656&adbpl=tw&adbpr=52847728',
111 'only_matching': True,
112 },
113 {
114 # From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer
115 'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb',
116 'only_matching': True,
117 },
118 {
119 'url': 'http://washington.nationals.mlb.com/mlb/gameday/index.jsp?c_id=was&gid=2015_05_09_atlmlb_wasmlb_1&lang=en&content_id=108309983&mode=video#',
120 'only_matching': True,
121 }
122 ]
123
124 def _real_extract(self, url):
125 mobj = re.match(self._VALID_URL, url)
126 video_id = mobj.group('id')
127
128 if not video_id:
129 video_path = mobj.group('path')
130 webpage = self._download_webpage(url, video_path)
131 video_id = self._search_regex(
132 [r'data-video-?id="(\d+)"', r'content_id=(\d+)'], webpage, 'video id')
133
134 detail = self._download_xml(
135 'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml'
136 % (video_id[-3], video_id[-2], video_id[-1], video_id), video_id)
137
138 title = detail.find('./headline').text
139 description = detail.find('./big-blurb').text
140 duration = parse_duration(detail.find('./duration').text)
141 timestamp = parse_iso8601(detail.attrib['date'][:-5])
142
143 thumbnails = [{
144 'url': thumbnail.text,
145 } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]
146
147 formats = []
148 for media_url in detail.findall('./url'):
149 playback_scenario = media_url.attrib['playback_scenario']
150 fmt = {
151 'url': media_url.text,
152 'format_id': playback_scenario,
153 }
154 m = re.search(r'(?P<vbr>\d+)K_(?P<width>\d+)X(?P<height>\d+)', playback_scenario)
155 if m:
156 fmt.update({
157 'vbr': int(m.group('vbr')) * 1000,
158 'width': int(m.group('width')),
159 'height': int(m.group('height')),
160 })
161 formats.append(fmt)
162
163 self._sort_formats(formats)
164
165 return {
166 'id': video_id,
167 'title': title,
168 'description': description,
169 'duration': duration,
170 'timestamp': timestamp,
171 'formats': formats,
172 'thumbnails': thumbnails,
173 }
174
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/youtube_dl/extractor/mlb.py b/youtube_dl/extractor/mlb.py
--- a/youtube_dl/extractor/mlb.py
+++ b/youtube_dl/extractor/mlb.py
@@ -15,7 +15,7 @@
(?:[\da-z_-]+\.)*mlb\.com/
(?:
(?:
- (?:.*?/)?video/(?:topic/[\da-z_-]+/)?v|
+ (?:.*?/)?video/(?:topic/[\da-z_-]+/)?(?:v|.*?/c-)|
(?:
shared/video/embed/(?:embed|m-internal-embed)\.html|
(?:[^/]+/)+(?:play|index)\.jsp|
@@ -94,6 +94,10 @@
'upload_date': '20150415',
}
},
+ {
+ 'url': 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694',
+ 'only_matching': True,
+ },
{
'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',
'only_matching': True,
|
{"golden_diff": "diff --git a/youtube_dl/extractor/mlb.py b/youtube_dl/extractor/mlb.py\n--- a/youtube_dl/extractor/mlb.py\n+++ b/youtube_dl/extractor/mlb.py\n@@ -15,7 +15,7 @@\n (?:[\\da-z_-]+\\.)*mlb\\.com/\n (?:\n (?:\n- (?:.*?/)?video/(?:topic/[\\da-z_-]+/)?v|\n+ (?:.*?/)?video/(?:topic/[\\da-z_-]+/)?(?:v|.*?/c-)|\n (?:\n shared/video/embed/(?:embed|m-internal-embed)\\.html|\n (?:[^/]+/)+(?:play|index)\\.jsp|\n@@ -94,6 +94,10 @@\n 'upload_date': '20150415',\n }\n },\n+ {\n+ 'url': 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694',\n+ 'only_matching': True,\n+ },\n {\n 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',\n 'only_matching': True,\n", "issue": "[MLB] ERROR: Unable to extract video id\n## Please follow the guide below\r\n\r\n- You will be asked some questions and requested to provide some information, please read them **carefully** and answer honestly\r\n- Put an `x` into all the boxes [ ] relevant to your *issue* (like that [x])\r\n- Use *Preview* tab to see how your issue will actually look like\r\n\r\n---\r\n\r\n### Make sure you are using the *latest* version: run `youtube-dl --version` and ensure your version is *2017.07.23*. If it's not read [this FAQ entry](https://github.com/rg3/youtube-dl/blob/master/README.md#how-do-i-update-youtube-dl) and update. Issues with outdated version will be rejected.\r\n- [X] I've **verified** and **I assure** that I'm running youtube-dl **2017.07.23**\r\n\r\n### Before submitting an *issue* make sure you have:\r\n- [X] At least skimmed through [README](https://github.com/rg3/youtube-dl/blob/master/README.md) and **most notably** [FAQ](https://github.com/rg3/youtube-dl#faq) and [BUGS](https://github.com/rg3/youtube-dl#bugs) sections\r\n- [X] [Searched](https://github.com/rg3/youtube-dl/search?type=Issues) the bugtracker for similar issues including closed ones\r\n\r\n### What is the purpose of your *issue*?\r\n- [X] Bug report (encountered problems with youtube-dl)\r\n- [ ] Site support request (request for adding support for a new site)\r\n- [ ] Feature request (request for a new functionality)\r\n- [ ] Question\r\n- [ ] Other\r\n\r\n---\r\n\r\n### The following sections concretize particular purposed issues, you can erase any section (the contents between triple ---) not applicable to your *issue*\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *bug report*, *site support request* or you are not completely sure provide the full verbose output as follows:\r\n\r\nAdd `-v` flag to **your command line** you run youtube-dl with, copy the **whole** output and insert it here. It should look similar to one below (replace it with **your** log inserted between triple ```):\r\n```\r\nyoutube-dl.py -v \"https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694\"\r\n[debug] System config: []\r\n[debug] User config: []\r\n[debug] Custom config: []\r\n[debug] Command-line args: ['-v', 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694']\r\n[debug] Encodings: locale cp1252, fs mbcs, out cp850, pref cp1252\r\n[debug] youtube-dl version 2017.07.23\r\n[debug] Python version 3.5.1 - Windows-7-6.1.7601-SP1\r\n[debug] exe versions: ffmpeg N-71727-g46778ab, ffprobe 3.2, rtmpdump 2.4\r\n[debug] Proxy map: {}\r\n[MLB] c-1352023483?tid=67793694: Downloading webpage\r\nERROR: Unable to extract video id; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure to call youtube-dl with the -\r\n-verbose flag and include its complete output.\r\nTraceback (most recent call last):\r\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\YoutubeDL.py\", line 776, in extract_info\r\n ie_result = ie.extract(url)\r\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\extractor\\common.py\", line 433, in extract\r\n ie_result = self._real_extract(url)\r\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\extractor\\mlb.py\", line 132, in _real_extract\r\n [r'data-video-?id=\"(\\d+)\"', r'content_id=(\\d+)'], webpage, 'video id')\r\n File \"C:\\Transmogrifier\\youtube-dl.py\\youtube_dl\\extractor\\common.py\", line 782, in _search_regex\r\n raise RegexNotFoundError('Unable to extract %s' % _name)\r\nyoutube_dl.utils.RegexNotFoundError: Unable to extract video id; please report this issue on https://yt-dl.org/bug . Make sure you are using the latest version; type youtube-dl -U to update. Be sure\r\n to call youtube-dl with the --verbose flag and include its complete output.\r\n\r\n```\r\n\r\n---\r\n\r\n### If the purpose of this *issue* is a *site support request* please provide all kinds of example URLs support for which should be included (replace following example URLs by **yours**):\r\n\r\n- Single video: https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694\r\n\r\n\r\n\r\n\r\nThanks\r\nRingo\r\n\n", "before_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n parse_duration,\n parse_iso8601,\n)\n\n\nclass MLBIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://\n (?:[\\da-z_-]+\\.)*mlb\\.com/\n (?:\n (?:\n (?:.*?/)?video/(?:topic/[\\da-z_-]+/)?v|\n (?:\n shared/video/embed/(?:embed|m-internal-embed)\\.html|\n (?:[^/]+/)+(?:play|index)\\.jsp|\n )\\?.*?\\bcontent_id=\n )\n (?P<id>n?\\d+)|\n (?:[^/]+/)*(?P<path>[^/]+)\n )\n '''\n _TESTS = [\n {\n 'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',\n 'md5': 'ff56a598c2cf411a9a38a69709e97079',\n 'info_dict': {\n 'id': '34698933',\n 'ext': 'mp4',\n 'title': \"Ackley's spectacular catch\",\n 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',\n 'duration': 66,\n 'timestamp': 1405980600,\n 'upload_date': '20140721',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby',\n 'md5': 'd9c022c10d21f849f49c05ae12a8a7e9',\n 'info_dict': {\n 'id': '34496663',\n 'ext': 'mp4',\n 'title': 'Stanton prepares for Derby',\n 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',\n 'duration': 46,\n 'timestamp': 1405105800,\n 'upload_date': '20140711',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby',\n 'md5': '0e6e73d509321e142409b695eadd541f',\n 'info_dict': {\n 'id': '34578115',\n 'ext': 'mp4',\n 'title': 'Cespedes repeats as Derby champ',\n 'description': 'md5:08df253ce265d4cf6fb09f581fafad07',\n 'duration': 488,\n 'timestamp': 1405399936,\n 'upload_date': '20140715',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance',\n 'md5': 'b8fd237347b844365d74ea61d4245967',\n 'info_dict': {\n 'id': '34577915',\n 'ext': 'mp4',\n 'title': 'Bautista on Home Run Derby',\n 'description': 'md5:b80b34031143d0986dddc64a8839f0fb',\n 'duration': 52,\n 'timestamp': 1405390722,\n 'upload_date': '20140715',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer',\n 'md5': 'b190e70141fb9a1552a85426b4da1b5d',\n 'info_dict': {\n 'id': '75609783',\n 'ext': 'mp4',\n 'title': 'Must C: Pillar climbs for catch',\n 'description': '4/15/15: Blue Jays outfielder Kevin Pillar continues his defensive dominance by climbing the wall in left to rob Tim Beckham of a home run',\n 'timestamp': 1429124820,\n 'upload_date': '20150415',\n }\n },\n {\n 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',\n 'only_matching': True,\n },\n {\n 'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',\n 'only_matching': True,\n },\n {\n 'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',\n 'only_matching': True,\n },\n {\n 'url': 'http://m.cardinals.mlb.com/stl/video/v51175783/atlstl-piscotty-makes-great-sliding-catch-on-line/?partnerId=as_mlb_20150321_42500876&adbid=579409712979910656&adbpl=tw&adbpr=52847728',\n 'only_matching': True,\n },\n {\n # From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer\n 'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb',\n 'only_matching': True,\n },\n {\n 'url': 'http://washington.nationals.mlb.com/mlb/gameday/index.jsp?c_id=was&gid=2015_05_09_atlmlb_wasmlb_1&lang=en&content_id=108309983&mode=video#',\n 'only_matching': True,\n }\n ]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n\n if not video_id:\n video_path = mobj.group('path')\n webpage = self._download_webpage(url, video_path)\n video_id = self._search_regex(\n [r'data-video-?id=\"(\\d+)\"', r'content_id=(\\d+)'], webpage, 'video id')\n\n detail = self._download_xml(\n 'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml'\n % (video_id[-3], video_id[-2], video_id[-1], video_id), video_id)\n\n title = detail.find('./headline').text\n description = detail.find('./big-blurb').text\n duration = parse_duration(detail.find('./duration').text)\n timestamp = parse_iso8601(detail.attrib['date'][:-5])\n\n thumbnails = [{\n 'url': thumbnail.text,\n } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]\n\n formats = []\n for media_url in detail.findall('./url'):\n playback_scenario = media_url.attrib['playback_scenario']\n fmt = {\n 'url': media_url.text,\n 'format_id': playback_scenario,\n }\n m = re.search(r'(?P<vbr>\\d+)K_(?P<width>\\d+)X(?P<height>\\d+)', playback_scenario)\n if m:\n fmt.update({\n 'vbr': int(m.group('vbr')) * 1000,\n 'width': int(m.group('width')),\n 'height': int(m.group('height')),\n })\n formats.append(fmt)\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'duration': duration,\n 'timestamp': timestamp,\n 'formats': formats,\n 'thumbnails': thumbnails,\n }\n", "path": "youtube_dl/extractor/mlb.py"}], "after_files": [{"content": "from __future__ import unicode_literals\n\nimport re\n\nfrom .common import InfoExtractor\nfrom ..utils import (\n parse_duration,\n parse_iso8601,\n)\n\n\nclass MLBIE(InfoExtractor):\n _VALID_URL = r'''(?x)\n https?://\n (?:[\\da-z_-]+\\.)*mlb\\.com/\n (?:\n (?:\n (?:.*?/)?video/(?:topic/[\\da-z_-]+/)?(?:v|.*?/c-)|\n (?:\n shared/video/embed/(?:embed|m-internal-embed)\\.html|\n (?:[^/]+/)+(?:play|index)\\.jsp|\n )\\?.*?\\bcontent_id=\n )\n (?P<id>n?\\d+)|\n (?:[^/]+/)*(?P<path>[^/]+)\n )\n '''\n _TESTS = [\n {\n 'url': 'http://m.mlb.com/sea/video/topic/51231442/v34698933/nymsea-ackley-robs-a-home-run-with-an-amazing-catch/?c_id=sea',\n 'md5': 'ff56a598c2cf411a9a38a69709e97079',\n 'info_dict': {\n 'id': '34698933',\n 'ext': 'mp4',\n 'title': \"Ackley's spectacular catch\",\n 'description': 'md5:7f5a981eb4f3cbc8daf2aeffa2215bf0',\n 'duration': 66,\n 'timestamp': 1405980600,\n 'upload_date': '20140721',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/video/topic/81536970/v34496663/mianym-stanton-practices-for-the-home-run-derby',\n 'md5': 'd9c022c10d21f849f49c05ae12a8a7e9',\n 'info_dict': {\n 'id': '34496663',\n 'ext': 'mp4',\n 'title': 'Stanton prepares for Derby',\n 'description': 'md5:d00ce1e5fd9c9069e9c13ab4faedfa57',\n 'duration': 46,\n 'timestamp': 1405105800,\n 'upload_date': '20140711',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/video/topic/vtp_hrd_sponsor/v34578115/hrd-cespedes-wins-2014-gillette-home-run-derby',\n 'md5': '0e6e73d509321e142409b695eadd541f',\n 'info_dict': {\n 'id': '34578115',\n 'ext': 'mp4',\n 'title': 'Cespedes repeats as Derby champ',\n 'description': 'md5:08df253ce265d4cf6fb09f581fafad07',\n 'duration': 488,\n 'timestamp': 1405399936,\n 'upload_date': '20140715',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/video/v34577915/bautista-on-derby-captaining-duties-his-performance',\n 'md5': 'b8fd237347b844365d74ea61d4245967',\n 'info_dict': {\n 'id': '34577915',\n 'ext': 'mp4',\n 'title': 'Bautista on Home Run Derby',\n 'description': 'md5:b80b34031143d0986dddc64a8839f0fb',\n 'duration': 52,\n 'timestamp': 1405390722,\n 'upload_date': '20140715',\n 'thumbnail': r're:^https?://.*\\.jpg$',\n },\n },\n {\n 'url': 'http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer',\n 'md5': 'b190e70141fb9a1552a85426b4da1b5d',\n 'info_dict': {\n 'id': '75609783',\n 'ext': 'mp4',\n 'title': 'Must C: Pillar climbs for catch',\n 'description': '4/15/15: Blue Jays outfielder Kevin Pillar continues his defensive dominance by climbing the wall in left to rob Tim Beckham of a home run',\n 'timestamp': 1429124820,\n 'upload_date': '20150415',\n }\n },\n {\n 'url': 'https://www.mlb.com/video/hargrove-homers-off-caldwell/c-1352023483?tid=67793694',\n 'only_matching': True,\n },\n {\n 'url': 'http://m.mlb.com/shared/video/embed/embed.html?content_id=35692085&topic_id=6479266&width=400&height=224&property=mlb',\n 'only_matching': True,\n },\n {\n 'url': 'http://mlb.mlb.com/shared/video/embed/embed.html?content_id=36599553',\n 'only_matching': True,\n },\n {\n 'url': 'http://mlb.mlb.com/es/video/play.jsp?content_id=36599553',\n 'only_matching': True,\n },\n {\n 'url': 'http://m.cardinals.mlb.com/stl/video/v51175783/atlstl-piscotty-makes-great-sliding-catch-on-line/?partnerId=as_mlb_20150321_42500876&adbid=579409712979910656&adbpl=tw&adbpr=52847728',\n 'only_matching': True,\n },\n {\n # From http://m.mlb.com/news/article/118550098/blue-jays-kevin-pillar-goes-spidey-up-the-wall-to-rob-tim-beckham-of-a-homer\n 'url': 'http://mlb.mlb.com/shared/video/embed/m-internal-embed.html?content_id=75609783&property=mlb&autoplay=true&hashmode=false&siteSection=mlb/multimedia/article_118550098/article_embed&club=mlb',\n 'only_matching': True,\n },\n {\n 'url': 'http://washington.nationals.mlb.com/mlb/gameday/index.jsp?c_id=was&gid=2015_05_09_atlmlb_wasmlb_1&lang=en&content_id=108309983&mode=video#',\n 'only_matching': True,\n }\n ]\n\n def _real_extract(self, url):\n mobj = re.match(self._VALID_URL, url)\n video_id = mobj.group('id')\n\n if not video_id:\n video_path = mobj.group('path')\n webpage = self._download_webpage(url, video_path)\n video_id = self._search_regex(\n [r'data-video-?id=\"(\\d+)\"', r'content_id=(\\d+)'], webpage, 'video id')\n\n detail = self._download_xml(\n 'http://m.mlb.com/gen/multimedia/detail/%s/%s/%s/%s.xml'\n % (video_id[-3], video_id[-2], video_id[-1], video_id), video_id)\n\n title = detail.find('./headline').text\n description = detail.find('./big-blurb').text\n duration = parse_duration(detail.find('./duration').text)\n timestamp = parse_iso8601(detail.attrib['date'][:-5])\n\n thumbnails = [{\n 'url': thumbnail.text,\n } for thumbnail in detail.findall('./thumbnailScenarios/thumbnailScenario')]\n\n formats = []\n for media_url in detail.findall('./url'):\n playback_scenario = media_url.attrib['playback_scenario']\n fmt = {\n 'url': media_url.text,\n 'format_id': playback_scenario,\n }\n m = re.search(r'(?P<vbr>\\d+)K_(?P<width>\\d+)X(?P<height>\\d+)', playback_scenario)\n if m:\n fmt.update({\n 'vbr': int(m.group('vbr')) * 1000,\n 'width': int(m.group('width')),\n 'height': int(m.group('height')),\n })\n formats.append(fmt)\n\n self._sort_formats(formats)\n\n return {\n 'id': video_id,\n 'title': title,\n 'description': description,\n 'duration': duration,\n 'timestamp': timestamp,\n 'formats': formats,\n 'thumbnails': thumbnails,\n }\n", "path": "youtube_dl/extractor/mlb.py"}]}
| 4,076 | 325 |
gh_patches_debug_54915
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-2286
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Detecting if a topic is a channel
## Summary
Currenty, we detect if a topic is a channel by checking that the `channel_id` and `pk` are equal. I ran into a channel where they are not equal. See 2nd channel in image below. Does this just need to be republished? Also, should we be just annotating channels as kind=channel in the models?


--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/content/serializers.py`
Content:
```
1 from django.core.cache import cache
2 from django.db.models import Manager, Sum
3 from django.db.models.query import RawQuerySet
4 from kolibri.content.models import AssessmentMetaData, ChannelMetadata, ContentNode, File, Language
5 from le_utils.constants import content_kinds
6 from rest_framework import serializers
7
8
9 class ChannelMetadataSerializer(serializers.ModelSerializer):
10 root = serializers.PrimaryKeyRelatedField(read_only=True)
11
12 def to_representation(self, instance):
13 value = super(ChannelMetadataSerializer, self).to_representation(instance)
14
15 # if it has the file_size flag add extra file_size information
16 if 'request' in self.context and self.context['request'].GET.get('file_sizes', False):
17 descendants = instance.root.get_descendants()
18 total_resources = descendants.exclude(kind=content_kinds.TOPIC).count()
19 channel_summary = descendants.prefetch_related('files__local_file').aggregate(
20 total_file_size=Sum('files__local_file__file_size')
21 )
22 value.update({"total_resources": total_resources})
23 value.update(channel_summary)
24 return value
25
26 class Meta:
27 model = ChannelMetadata
28 fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version')
29
30
31 class LowerCaseField(serializers.CharField):
32
33 def to_representation(self, obj):
34 return super(LowerCaseField, self).to_representation(obj).lower()
35
36
37 class LanguageSerializer(serializers.ModelSerializer):
38 id = LowerCaseField(max_length=14)
39 lang_code = LowerCaseField(max_length=3)
40 lang_subcode = LowerCaseField(max_length=10)
41
42 class Meta:
43 model = Language
44 fields = ('id', 'lang_code', 'lang_subcode', 'lang_name', 'lang_direction')
45
46
47 class FileSerializer(serializers.ModelSerializer):
48 storage_url = serializers.SerializerMethodField()
49 preset = serializers.SerializerMethodField()
50 download_url = serializers.SerializerMethodField()
51 extension = serializers.SerializerMethodField()
52 file_size = serializers.SerializerMethodField()
53 lang = LanguageSerializer()
54
55 def get_storage_url(self, target_node):
56 return target_node.get_storage_url()
57
58 def get_preset(self, target_node):
59 return target_node.get_preset()
60
61 def get_download_url(self, target_node):
62 return target_node.get_download_url()
63
64 def get_extension(self, target_node):
65 return target_node.get_extension()
66
67 def get_file_size(self, target_node):
68 return target_node.get_file_size()
69
70 class Meta:
71 model = File
72 fields = ('storage_url', 'id', 'priority', 'available', 'file_size', 'extension', 'preset', 'lang',
73 'supplementary', 'thumbnail', 'download_url')
74
75
76 class AssessmentMetaDataSerializer(serializers.ModelSerializer):
77
78 assessment_item_ids = serializers.JSONField(default='[]')
79 mastery_model = serializers.JSONField(default='{}')
80
81 class Meta:
82 model = AssessmentMetaData
83 fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )
84
85
86 def get_summary_logs(content_ids, user):
87 from kolibri.logger.models import ContentSummaryLog
88 if not content_ids:
89 return ContentSummaryLog.objects.none()
90 # get all summary logs for the current user that correspond to the descendant content nodes
91 return ContentSummaryLog.objects.filter(user=user, content_id__in=content_ids)
92
93
94 def get_topic_progress_fraction(topic, user):
95 leaf_ids = topic.get_descendants(include_self=False).order_by().exclude(
96 kind=content_kinds.TOPIC).values_list("content_id", flat=True)
97 return round(
98 (get_summary_logs(leaf_ids, user).aggregate(Sum('progress'))['progress__sum'] or 0) / (len(leaf_ids) or 1),
99 4
100 )
101
102
103 def get_content_progress_fraction(content, user):
104 from kolibri.logger.models import ContentSummaryLog
105 try:
106 # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress
107 overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content.content_id).progress
108 except ContentSummaryLog.DoesNotExist:
109 return None
110 return round(overall_progress, 4)
111
112
113 def get_topic_and_content_progress_fraction(node, user):
114 if node.kind == content_kinds.TOPIC:
115 return get_topic_progress_fraction(node, user)
116 else:
117 return get_content_progress_fraction(node, user)
118
119
120 def get_topic_and_content_progress_fractions(nodes, user):
121 leaf_ids = nodes.get_descendants(include_self=True).order_by().exclude(
122 kind=content_kinds.TOPIC).values_list("content_id", flat=True)
123
124 summary_logs = get_summary_logs(leaf_ids, user)
125
126 overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}
127
128 for node in nodes:
129 if node.kind == content_kinds.TOPIC:
130 leaf_ids = node.get_descendants(include_self=True).order_by().exclude(
131 kind=content_kinds.TOPIC).values_list("content_id", flat=True)
132 overall_progress[node.content_id] = round(
133 sum(overall_progress.get(leaf_id, 0) for leaf_id in leaf_ids) / len(leaf_ids),
134 4
135 )
136
137 return overall_progress
138
139
140 def get_content_progress_fractions(nodes, user):
141 if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):
142 leaf_ids = [datum.content_id for datum in nodes]
143 else:
144 leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list("content_id", flat=True)
145
146 summary_logs = get_summary_logs(leaf_ids, user)
147
148 # make a lookup dict for all logs to allow mapping from content_id to current progress
149 overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}
150 return overall_progress
151
152
153 class ContentNodeListSerializer(serializers.ListSerializer):
154
155 def to_representation(self, data):
156
157 # Dealing with nested relationships, data can be a Manager,
158 # so, first get a queryset from the Manager if needed
159 data = data.all() if isinstance(data, Manager) else data
160
161 cache_key = None
162 # Cache parent look ups only
163 if "parent" in self.context['request'].GET:
164 cache_key = 'contentnode_list_{parent}'.format(
165 parent=self.context['request'].GET.get('parent'))
166
167 if cache.get(cache_key):
168 return cache.get(cache_key)
169
170 if not data:
171 return data
172
173 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
174 progress_dict = {}
175 else:
176 user = self.context["request"].user
177 # Don't annotate topic progress as too expensive
178 progress_dict = get_content_progress_fractions(data, user)
179
180 result = []
181 topic_only = True
182
183 # Allow results to be limited after all queryset filtering has occurred
184 if self.limit:
185 data = data[:self.limit]
186
187 for item in data:
188 obj = self.child.to_representation(
189 item,
190 progress_fraction=progress_dict.get(item.content_id),
191 annotate_progress_fraction=False
192 )
193 topic_only = topic_only and obj.get('kind') == content_kinds.TOPIC
194 result.append(obj)
195
196 # Only store if all nodes are topics, because we don't annotate progress on them
197 # This has the happy side effect of not caching our dynamically calculated
198 # recommendation queries, which might change for the same user over time
199 # because they do not return topics
200 if topic_only and cache_key:
201 cache.set(cache_key, result, 60 * 10)
202
203 return result
204
205
206 class ContentNodeSerializer(serializers.ModelSerializer):
207 parent = serializers.PrimaryKeyRelatedField(read_only=True)
208 files = FileSerializer(many=True, read_only=True)
209 assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)
210 license = serializers.StringRelatedField(many=False)
211 license_description = serializers.SerializerMethodField()
212 lang = LanguageSerializer()
213
214 def __new__(cls, *args, **kwargs):
215 # This is overwritten to provide a ListClassSerializer for many=True
216 limit = kwargs.pop('limit', None)
217 new = super(ContentNodeSerializer, cls).__new__(cls, *args, **kwargs)
218 new.limit = limit
219 return new
220
221 def __init__(self, *args, **kwargs):
222 # Instantiate the superclass normally
223 super(ContentNodeSerializer, self).__init__(*args, **kwargs)
224
225 # enable dynamic fields specification!
226 if 'request' in self.context and self.context['request'].GET.get('fields', None):
227 fields = self.context['request'].GET['fields'].split(',')
228 # Drop any fields that are not specified in the `fields` argument.
229 allowed = set(fields)
230 existing = set(self.fields.keys())
231 for field_name in existing - allowed:
232 self.fields.pop(field_name)
233
234 def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):
235 if progress_fraction is None and annotate_progress_fraction:
236 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
237 # Don't try to annotate for a non facility user
238 progress_fraction = 0.0
239 else:
240 user = self.context["request"].user
241 if instance.kind != content_kinds.TOPIC:
242 progress_fraction = get_content_progress_fraction(instance, user)
243 value = super(ContentNodeSerializer, self).to_representation(instance)
244 value['progress_fraction'] = progress_fraction
245 return value
246
247 def get_license_description(self, target_node):
248 if target_node.license_id:
249 return target_node.license.license_description
250 return ''
251
252 class Meta:
253 model = ContentNode
254 fields = (
255 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',
256 'license', 'license_description', 'files', 'parent', 'author',
257 'assessmentmetadata', 'lang', 'channel_id',
258 )
259
260 list_serializer_class = ContentNodeListSerializer
261
262
263 class ContentNodeProgressListSerializer(serializers.ListSerializer):
264
265 def to_representation(self, data):
266
267 if not data:
268 return data
269
270 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
271 progress_dict = {}
272 else:
273 user = self.context["request"].user
274 # Don't annotate topic progress as too expensive
275 progress_dict = get_topic_and_content_progress_fractions(data, user)
276
277 # Dealing with nested relationships, data can be a Manager,
278 # so, first get a queryset from the Manager if needed
279 iterable = data.all() if isinstance(data, Manager) else data
280
281 return [
282 self.child.to_representation(
283 item,
284 progress_fraction=progress_dict.get(item.content_id, 0.0),
285 annotate_progress_fraction=False
286 ) for item in iterable
287 ]
288
289
290 class ContentNodeProgressSerializer(serializers.Serializer):
291
292 def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):
293 if progress_fraction is None and annotate_progress_fraction:
294 if 'request' not in self.context or not self.context['request'].user.is_facility_user:
295 # Don't try to annotate for a non facility user
296 progress_fraction = 0
297 else:
298 user = self.context["request"].user
299 progress_fraction = get_topic_and_content_progress_fraction(instance, user) or 0.0
300 return {
301 'pk': instance.pk,
302 'progress_fraction': progress_fraction,
303 }
304
305 class Meta:
306 list_serializer_class = ContentNodeProgressListSerializer
307
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/content/serializers.py b/kolibri/content/serializers.py
--- a/kolibri/content/serializers.py
+++ b/kolibri/content/serializers.py
@@ -25,7 +25,7 @@
class Meta:
model = ChannelMetadata
- fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version')
+ fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version', 'thumbnail')
class LowerCaseField(serializers.CharField):
|
{"golden_diff": "diff --git a/kolibri/content/serializers.py b/kolibri/content/serializers.py\n--- a/kolibri/content/serializers.py\n+++ b/kolibri/content/serializers.py\n@@ -25,7 +25,7 @@\n \n class Meta:\n model = ChannelMetadata\n- fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version')\n+ fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version', 'thumbnail')\n \n \n class LowerCaseField(serializers.CharField):\n", "issue": "Detecting if a topic is a channel\n## Summary\r\n\r\nCurrenty, we detect if a topic is a channel by checking that the `channel_id` and `pk` are equal. I ran into a channel where they are not equal. See 2nd channel in image below. Does this just need to be republished? Also, should we be just annotating channels as kind=channel in the models?\r\n\r\n\r\n\r\n\r\n\r\n\n", "before_files": [{"content": "from django.core.cache import cache\nfrom django.db.models import Manager, Sum\nfrom django.db.models.query import RawQuerySet\nfrom kolibri.content.models import AssessmentMetaData, ChannelMetadata, ContentNode, File, Language\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\n\nclass ChannelMetadataSerializer(serializers.ModelSerializer):\n root = serializers.PrimaryKeyRelatedField(read_only=True)\n\n def to_representation(self, instance):\n value = super(ChannelMetadataSerializer, self).to_representation(instance)\n\n # if it has the file_size flag add extra file_size information\n if 'request' in self.context and self.context['request'].GET.get('file_sizes', False):\n descendants = instance.root.get_descendants()\n total_resources = descendants.exclude(kind=content_kinds.TOPIC).count()\n channel_summary = descendants.prefetch_related('files__local_file').aggregate(\n total_file_size=Sum('files__local_file__file_size')\n )\n value.update({\"total_resources\": total_resources})\n value.update(channel_summary)\n return value\n\n class Meta:\n model = ChannelMetadata\n fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version')\n\n\nclass LowerCaseField(serializers.CharField):\n\n def to_representation(self, obj):\n return super(LowerCaseField, self).to_representation(obj).lower()\n\n\nclass LanguageSerializer(serializers.ModelSerializer):\n id = LowerCaseField(max_length=14)\n lang_code = LowerCaseField(max_length=3)\n lang_subcode = LowerCaseField(max_length=10)\n\n class Meta:\n model = Language\n fields = ('id', 'lang_code', 'lang_subcode', 'lang_name', 'lang_direction')\n\n\nclass FileSerializer(serializers.ModelSerializer):\n storage_url = serializers.SerializerMethodField()\n preset = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n extension = serializers.SerializerMethodField()\n file_size = serializers.SerializerMethodField()\n lang = LanguageSerializer()\n\n def get_storage_url(self, target_node):\n return target_node.get_storage_url()\n\n def get_preset(self, target_node):\n return target_node.get_preset()\n\n def get_download_url(self, target_node):\n return target_node.get_download_url()\n\n def get_extension(self, target_node):\n return target_node.get_extension()\n\n def get_file_size(self, target_node):\n return target_node.get_file_size()\n\n class Meta:\n model = File\n fields = ('storage_url', 'id', 'priority', 'available', 'file_size', 'extension', 'preset', 'lang',\n 'supplementary', 'thumbnail', 'download_url')\n\n\nclass AssessmentMetaDataSerializer(serializers.ModelSerializer):\n\n assessment_item_ids = serializers.JSONField(default='[]')\n mastery_model = serializers.JSONField(default='{}')\n\n class Meta:\n model = AssessmentMetaData\n fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )\n\n\ndef get_summary_logs(content_ids, user):\n from kolibri.logger.models import ContentSummaryLog\n if not content_ids:\n return ContentSummaryLog.objects.none()\n # get all summary logs for the current user that correspond to the descendant content nodes\n return ContentSummaryLog.objects.filter(user=user, content_id__in=content_ids)\n\n\ndef get_topic_progress_fraction(topic, user):\n leaf_ids = topic.get_descendants(include_self=False).order_by().exclude(\n kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n return round(\n (get_summary_logs(leaf_ids, user).aggregate(Sum('progress'))['progress__sum'] or 0) / (len(leaf_ids) or 1),\n 4\n )\n\n\ndef get_content_progress_fraction(content, user):\n from kolibri.logger.models import ContentSummaryLog\n try:\n # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress\n overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content.content_id).progress\n except ContentSummaryLog.DoesNotExist:\n return None\n return round(overall_progress, 4)\n\n\ndef get_topic_and_content_progress_fraction(node, user):\n if node.kind == content_kinds.TOPIC:\n return get_topic_progress_fraction(node, user)\n else:\n return get_content_progress_fraction(node, user)\n\n\ndef get_topic_and_content_progress_fractions(nodes, user):\n leaf_ids = nodes.get_descendants(include_self=True).order_by().exclude(\n kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n\n summary_logs = get_summary_logs(leaf_ids, user)\n\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n\n for node in nodes:\n if node.kind == content_kinds.TOPIC:\n leaf_ids = node.get_descendants(include_self=True).order_by().exclude(\n kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n overall_progress[node.content_id] = round(\n sum(overall_progress.get(leaf_id, 0) for leaf_id in leaf_ids) / len(leaf_ids),\n 4\n )\n\n return overall_progress\n\n\ndef get_content_progress_fractions(nodes, user):\n if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n\n summary_logs = get_summary_logs(leaf_ids, user)\n\n # make a lookup dict for all logs to allow mapping from content_id to current progress\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n return overall_progress\n\n\nclass ContentNodeListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n data = data.all() if isinstance(data, Manager) else data\n\n cache_key = None\n # Cache parent look ups only\n if \"parent\" in self.context['request'].GET:\n cache_key = 'contentnode_list_{parent}'.format(\n parent=self.context['request'].GET.get('parent'))\n\n if cache.get(cache_key):\n return cache.get(cache_key)\n\n if not data:\n return data\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n # Don't annotate topic progress as too expensive\n progress_dict = get_content_progress_fractions(data, user)\n\n result = []\n topic_only = True\n\n # Allow results to be limited after all queryset filtering has occurred\n if self.limit:\n data = data[:self.limit]\n\n for item in data:\n obj = self.child.to_representation(\n item,\n progress_fraction=progress_dict.get(item.content_id),\n annotate_progress_fraction=False\n )\n topic_only = topic_only and obj.get('kind') == content_kinds.TOPIC\n result.append(obj)\n\n # Only store if all nodes are topics, because we don't annotate progress on them\n # This has the happy side effect of not caching our dynamically calculated\n # recommendation queries, which might change for the same user over time\n # because they do not return topics\n if topic_only and cache_key:\n cache.set(cache_key, result, 60 * 10)\n\n return result\n\n\nclass ContentNodeSerializer(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n files = FileSerializer(many=True, read_only=True)\n assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)\n license = serializers.StringRelatedField(many=False)\n license_description = serializers.SerializerMethodField()\n lang = LanguageSerializer()\n\n def __new__(cls, *args, **kwargs):\n # This is overwritten to provide a ListClassSerializer for many=True\n limit = kwargs.pop('limit', None)\n new = super(ContentNodeSerializer, cls).__new__(cls, *args, **kwargs)\n new.limit = limit\n return new\n\n def __init__(self, *args, **kwargs):\n # Instantiate the superclass normally\n super(ContentNodeSerializer, self).__init__(*args, **kwargs)\n\n # enable dynamic fields specification!\n if 'request' in self.context and self.context['request'].GET.get('fields', None):\n fields = self.context['request'].GET['fields'].split(',')\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):\n if progress_fraction is None and annotate_progress_fraction:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n # Don't try to annotate for a non facility user\n progress_fraction = 0.0\n else:\n user = self.context[\"request\"].user\n if instance.kind != content_kinds.TOPIC:\n progress_fraction = get_content_progress_fraction(instance, user)\n value = super(ContentNodeSerializer, self).to_representation(instance)\n value['progress_fraction'] = progress_fraction\n return value\n\n def get_license_description(self, target_node):\n if target_node.license_id:\n return target_node.license.license_description\n return ''\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',\n 'license', 'license_description', 'files', 'parent', 'author',\n 'assessmentmetadata', 'lang', 'channel_id',\n )\n\n list_serializer_class = ContentNodeListSerializer\n\n\nclass ContentNodeProgressListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n if not data:\n return data\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n # Don't annotate topic progress as too expensive\n progress_dict = get_topic_and_content_progress_fractions(data, user)\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data.all() if isinstance(data, Manager) else data\n\n return [\n self.child.to_representation(\n item,\n progress_fraction=progress_dict.get(item.content_id, 0.0),\n annotate_progress_fraction=False\n ) for item in iterable\n ]\n\n\nclass ContentNodeProgressSerializer(serializers.Serializer):\n\n def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):\n if progress_fraction is None and annotate_progress_fraction:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n # Don't try to annotate for a non facility user\n progress_fraction = 0\n else:\n user = self.context[\"request\"].user\n progress_fraction = get_topic_and_content_progress_fraction(instance, user) or 0.0\n return {\n 'pk': instance.pk,\n 'progress_fraction': progress_fraction,\n }\n\n class Meta:\n list_serializer_class = ContentNodeProgressListSerializer\n", "path": "kolibri/content/serializers.py"}], "after_files": [{"content": "from django.core.cache import cache\nfrom django.db.models import Manager, Sum\nfrom django.db.models.query import RawQuerySet\nfrom kolibri.content.models import AssessmentMetaData, ChannelMetadata, ContentNode, File, Language\nfrom le_utils.constants import content_kinds\nfrom rest_framework import serializers\n\n\nclass ChannelMetadataSerializer(serializers.ModelSerializer):\n root = serializers.PrimaryKeyRelatedField(read_only=True)\n\n def to_representation(self, instance):\n value = super(ChannelMetadataSerializer, self).to_representation(instance)\n\n # if it has the file_size flag add extra file_size information\n if 'request' in self.context and self.context['request'].GET.get('file_sizes', False):\n descendants = instance.root.get_descendants()\n total_resources = descendants.exclude(kind=content_kinds.TOPIC).count()\n channel_summary = descendants.prefetch_related('files__local_file').aggregate(\n total_file_size=Sum('files__local_file__file_size')\n )\n value.update({\"total_resources\": total_resources})\n value.update(channel_summary)\n return value\n\n class Meta:\n model = ChannelMetadata\n fields = ('root', 'id', 'name', 'description', 'author', 'last_updated', 'version', 'thumbnail')\n\n\nclass LowerCaseField(serializers.CharField):\n\n def to_representation(self, obj):\n return super(LowerCaseField, self).to_representation(obj).lower()\n\n\nclass LanguageSerializer(serializers.ModelSerializer):\n id = LowerCaseField(max_length=14)\n lang_code = LowerCaseField(max_length=3)\n lang_subcode = LowerCaseField(max_length=10)\n\n class Meta:\n model = Language\n fields = ('id', 'lang_code', 'lang_subcode', 'lang_name', 'lang_direction')\n\n\nclass FileSerializer(serializers.ModelSerializer):\n storage_url = serializers.SerializerMethodField()\n preset = serializers.SerializerMethodField()\n download_url = serializers.SerializerMethodField()\n extension = serializers.SerializerMethodField()\n file_size = serializers.SerializerMethodField()\n lang = LanguageSerializer()\n\n def get_storage_url(self, target_node):\n return target_node.get_storage_url()\n\n def get_preset(self, target_node):\n return target_node.get_preset()\n\n def get_download_url(self, target_node):\n return target_node.get_download_url()\n\n def get_extension(self, target_node):\n return target_node.get_extension()\n\n def get_file_size(self, target_node):\n return target_node.get_file_size()\n\n class Meta:\n model = File\n fields = ('storage_url', 'id', 'priority', 'available', 'file_size', 'extension', 'preset', 'lang',\n 'supplementary', 'thumbnail', 'download_url')\n\n\nclass AssessmentMetaDataSerializer(serializers.ModelSerializer):\n\n assessment_item_ids = serializers.JSONField(default='[]')\n mastery_model = serializers.JSONField(default='{}')\n\n class Meta:\n model = AssessmentMetaData\n fields = ('assessment_item_ids', 'number_of_assessments', 'mastery_model', 'randomize', 'is_manipulable', )\n\n\ndef get_summary_logs(content_ids, user):\n from kolibri.logger.models import ContentSummaryLog\n if not content_ids:\n return ContentSummaryLog.objects.none()\n # get all summary logs for the current user that correspond to the descendant content nodes\n return ContentSummaryLog.objects.filter(user=user, content_id__in=content_ids)\n\n\ndef get_topic_progress_fraction(topic, user):\n leaf_ids = topic.get_descendants(include_self=False).order_by().exclude(\n kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n return round(\n (get_summary_logs(leaf_ids, user).aggregate(Sum('progress'))['progress__sum'] or 0) / (len(leaf_ids) or 1),\n 4\n )\n\n\ndef get_content_progress_fraction(content, user):\n from kolibri.logger.models import ContentSummaryLog\n try:\n # add up all the progress for the logs, and divide by the total number of content nodes to get overall progress\n overall_progress = ContentSummaryLog.objects.get(user=user, content_id=content.content_id).progress\n except ContentSummaryLog.DoesNotExist:\n return None\n return round(overall_progress, 4)\n\n\ndef get_topic_and_content_progress_fraction(node, user):\n if node.kind == content_kinds.TOPIC:\n return get_topic_progress_fraction(node, user)\n else:\n return get_content_progress_fraction(node, user)\n\n\ndef get_topic_and_content_progress_fractions(nodes, user):\n leaf_ids = nodes.get_descendants(include_self=True).order_by().exclude(\n kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n\n summary_logs = get_summary_logs(leaf_ids, user)\n\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n\n for node in nodes:\n if node.kind == content_kinds.TOPIC:\n leaf_ids = node.get_descendants(include_self=True).order_by().exclude(\n kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n overall_progress[node.content_id] = round(\n sum(overall_progress.get(leaf_id, 0) for leaf_id in leaf_ids) / len(leaf_ids),\n 4\n )\n\n return overall_progress\n\n\ndef get_content_progress_fractions(nodes, user):\n if isinstance(nodes, RawQuerySet) or isinstance(nodes, list):\n leaf_ids = [datum.content_id for datum in nodes]\n else:\n leaf_ids = nodes.exclude(kind=content_kinds.TOPIC).values_list(\"content_id\", flat=True)\n\n summary_logs = get_summary_logs(leaf_ids, user)\n\n # make a lookup dict for all logs to allow mapping from content_id to current progress\n overall_progress = {log['content_id']: round(log['progress'], 4) for log in summary_logs.values('content_id', 'progress')}\n return overall_progress\n\n\nclass ContentNodeListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n data = data.all() if isinstance(data, Manager) else data\n\n cache_key = None\n # Cache parent look ups only\n if \"parent\" in self.context['request'].GET:\n cache_key = 'contentnode_list_{parent}'.format(\n parent=self.context['request'].GET.get('parent'))\n\n if cache.get(cache_key):\n return cache.get(cache_key)\n\n if not data:\n return data\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n # Don't annotate topic progress as too expensive\n progress_dict = get_content_progress_fractions(data, user)\n\n result = []\n topic_only = True\n\n # Allow results to be limited after all queryset filtering has occurred\n if self.limit:\n data = data[:self.limit]\n\n for item in data:\n obj = self.child.to_representation(\n item,\n progress_fraction=progress_dict.get(item.content_id),\n annotate_progress_fraction=False\n )\n topic_only = topic_only and obj.get('kind') == content_kinds.TOPIC\n result.append(obj)\n\n # Only store if all nodes are topics, because we don't annotate progress on them\n # This has the happy side effect of not caching our dynamically calculated\n # recommendation queries, which might change for the same user over time\n # because they do not return topics\n if topic_only and cache_key:\n cache.set(cache_key, result, 60 * 10)\n\n return result\n\n\nclass ContentNodeSerializer(serializers.ModelSerializer):\n parent = serializers.PrimaryKeyRelatedField(read_only=True)\n files = FileSerializer(many=True, read_only=True)\n assessmentmetadata = AssessmentMetaDataSerializer(read_only=True, allow_null=True, many=True)\n license = serializers.StringRelatedField(many=False)\n license_description = serializers.SerializerMethodField()\n lang = LanguageSerializer()\n\n def __new__(cls, *args, **kwargs):\n # This is overwritten to provide a ListClassSerializer for many=True\n limit = kwargs.pop('limit', None)\n new = super(ContentNodeSerializer, cls).__new__(cls, *args, **kwargs)\n new.limit = limit\n return new\n\n def __init__(self, *args, **kwargs):\n # Instantiate the superclass normally\n super(ContentNodeSerializer, self).__init__(*args, **kwargs)\n\n # enable dynamic fields specification!\n if 'request' in self.context and self.context['request'].GET.get('fields', None):\n fields = self.context['request'].GET['fields'].split(',')\n # Drop any fields that are not specified in the `fields` argument.\n allowed = set(fields)\n existing = set(self.fields.keys())\n for field_name in existing - allowed:\n self.fields.pop(field_name)\n\n def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):\n if progress_fraction is None and annotate_progress_fraction:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n # Don't try to annotate for a non facility user\n progress_fraction = 0.0\n else:\n user = self.context[\"request\"].user\n if instance.kind != content_kinds.TOPIC:\n progress_fraction = get_content_progress_fraction(instance, user)\n value = super(ContentNodeSerializer, self).to_representation(instance)\n value['progress_fraction'] = progress_fraction\n return value\n\n def get_license_description(self, target_node):\n if target_node.license_id:\n return target_node.license.license_description\n return ''\n\n class Meta:\n model = ContentNode\n fields = (\n 'pk', 'content_id', 'title', 'description', 'kind', 'available', 'sort_order', 'license_owner',\n 'license', 'license_description', 'files', 'parent', 'author',\n 'assessmentmetadata', 'lang', 'channel_id',\n )\n\n list_serializer_class = ContentNodeListSerializer\n\n\nclass ContentNodeProgressListSerializer(serializers.ListSerializer):\n\n def to_representation(self, data):\n\n if not data:\n return data\n\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n progress_dict = {}\n else:\n user = self.context[\"request\"].user\n # Don't annotate topic progress as too expensive\n progress_dict = get_topic_and_content_progress_fractions(data, user)\n\n # Dealing with nested relationships, data can be a Manager,\n # so, first get a queryset from the Manager if needed\n iterable = data.all() if isinstance(data, Manager) else data\n\n return [\n self.child.to_representation(\n item,\n progress_fraction=progress_dict.get(item.content_id, 0.0),\n annotate_progress_fraction=False\n ) for item in iterable\n ]\n\n\nclass ContentNodeProgressSerializer(serializers.Serializer):\n\n def to_representation(self, instance, progress_fraction=None, annotate_progress_fraction=True):\n if progress_fraction is None and annotate_progress_fraction:\n if 'request' not in self.context or not self.context['request'].user.is_facility_user:\n # Don't try to annotate for a non facility user\n progress_fraction = 0\n else:\n user = self.context[\"request\"].user\n progress_fraction = get_topic_and_content_progress_fraction(instance, user) or 0.0\n return {\n 'pk': instance.pk,\n 'progress_fraction': progress_fraction,\n }\n\n class Meta:\n list_serializer_class = ContentNodeProgressListSerializer\n", "path": "kolibri/content/serializers.py"}]}
| 3,824 | 130 |
gh_patches_debug_21085
|
rasdani/github-patches
|
git_diff
|
google__flax-541
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
PPO example does not terminate properly
### Configuration
Running the PPO example for a short number of frames in order to reproduce as fast as possible on a cloud VM with a V100 GPU. Config python3.7, flax 0.2.2, jax 0.2.1, jaxlib 0.1.55 .
Command run:
`python ppo_main.py --config.game=Qbert --config.total_frames=4000`
### Problem you have encountered:
Program does not exit. One can `print('Done')` after `ppo_lib.train` in `ppo_main` but there is an open thread and program can't exit (even after adding `raise SystemExit`).
### Extra comments
Added extra line in `main` ` tf.config.experimental.set_visible_devices([],'GPU')` in order for the program to run properly with `tensorflow-gpu`, this is common in other `flax/examples`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/ppo/ppo_main.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 import os
16 from absl import flags
17 from absl import app
18 import jax
19 import jax.random
20 from ml_collections import config_flags
21
22 import ppo_lib
23 import models
24 import env_utils
25
26 FLAGS = flags.FLAGS
27
28 flags.DEFINE_string(
29 'logdir', default='/tmp/ppo_training',
30 help=('Directory to save checkpoints and logging info.'))
31
32 config_flags.DEFINE_config_file(
33 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),
34 'File path to the default configuration file.')
35
36 def main(argv):
37 config = FLAGS.config
38 game = config.game + 'NoFrameskip-v4'
39 num_actions = env_utils.get_num_actions(game)
40 print(f'Playing {game} with {num_actions} actions')
41 key = jax.random.PRNGKey(0)
42 key, subkey = jax.random.split(key)
43 model = models.create_model(subkey, num_outputs=num_actions)
44 optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)
45 del model
46 optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)
47
48 if __name__ == '__main__':
49 app.run(main)
50
```
Path: `examples/ppo/agent.py`
Content:
```
1 # Copyright 2020 The Flax Authors.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Agent utilities, incl. choosing the move and running in separate process."""
16
17 import multiprocessing
18 import collections
19 import jax
20 import numpy as onp
21
22 import env_utils
23
24 @jax.jit
25 def policy_action(model, state):
26 """Forward pass of the network."""
27 out = model(state)
28 return out
29
30
31 ExpTuple = collections.namedtuple(
32 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])
33
34
35 class RemoteSimulator:
36 """Wrap functionality for an agent emulating Atari in a separate process.
37
38 An object of this class is created for every agent.
39 """
40
41 def __init__(self, game: str):
42 """Start the remote process and create Pipe() to communicate with it."""
43 parent_conn, child_conn = multiprocessing.Pipe()
44 self.proc = multiprocessing.Process(
45 target=rcv_action_send_exp, args=(child_conn, game))
46 self.conn = parent_conn
47 self.proc.start()
48
49
50 def rcv_action_send_exp(conn, game: str):
51 """Run the remote agents.
52
53 Receive action from the main learner, perform one step of simulation and
54 send back collected experience.
55 """
56 env = env_utils.create_env(game, clip_rewards=True)
57 while True:
58 obs = env.reset()
59 done = False
60 # Observations fetched from Atari env need additional batch dimension.
61 state = obs[None, ...]
62 while not done:
63 conn.send(state)
64 action = conn.recv()
65 obs, reward, done, _ = env.step(action)
66 next_state = obs[None, ...] if not done else None
67 experience = (state, action, reward, done)
68 conn.send(experience)
69 if done:
70 break
71 state = next_state
72
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/ppo/agent.py b/examples/ppo/agent.py
--- a/examples/ppo/agent.py
+++ b/examples/ppo/agent.py
@@ -43,6 +43,7 @@
parent_conn, child_conn = multiprocessing.Pipe()
self.proc = multiprocessing.Process(
target=rcv_action_send_exp, args=(child_conn, game))
+ self.proc.daemon = True
self.conn = parent_conn
self.proc.start()
diff --git a/examples/ppo/ppo_main.py b/examples/ppo/ppo_main.py
--- a/examples/ppo/ppo_main.py
+++ b/examples/ppo/ppo_main.py
@@ -19,6 +19,8 @@
import jax.random
from ml_collections import config_flags
+import tensorflow as tf
+
import ppo_lib
import models
import env_utils
@@ -34,6 +36,9 @@
'File path to the default configuration file.')
def main(argv):
+ # Make sure tf does not allocate gpu memory.
+ tf.config.experimental.set_visible_devices([], 'GPU')
+
config = FLAGS.config
game = config.game + 'NoFrameskip-v4'
num_actions = env_utils.get_num_actions(game)
|
{"golden_diff": "diff --git a/examples/ppo/agent.py b/examples/ppo/agent.py\n--- a/examples/ppo/agent.py\n+++ b/examples/ppo/agent.py\n@@ -43,6 +43,7 @@\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n+ self.proc.daemon = True\n self.conn = parent_conn\n self.proc.start()\n \ndiff --git a/examples/ppo/ppo_main.py b/examples/ppo/ppo_main.py\n--- a/examples/ppo/ppo_main.py\n+++ b/examples/ppo/ppo_main.py\n@@ -19,6 +19,8 @@\n import jax.random\n from ml_collections import config_flags\n \n+import tensorflow as tf\n+\n import ppo_lib\n import models\n import env_utils\n@@ -34,6 +36,9 @@\n 'File path to the default configuration file.')\n \n def main(argv):\n+ # Make sure tf does not allocate gpu memory.\n+ tf.config.experimental.set_visible_devices([], 'GPU')\n+\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n", "issue": "PPO example does not terminate properly\n### Configuration\r\n\r\nRunning the PPO example for a short number of frames in order to reproduce as fast as possible on a cloud VM with a V100 GPU. Config python3.7, flax 0.2.2, jax 0.2.1, jaxlib 0.1.55 .\r\n\r\nCommand run:\r\n`python ppo_main.py --config.game=Qbert --config.total_frames=4000`\r\n\r\n### Problem you have encountered:\r\n\r\nProgram does not exit. One can `print('Done')` after `ppo_lib.train` in `ppo_main` but there is an open thread and program can't exit (even after adding `raise SystemExit`).\r\n\r\n### Extra comments\r\n\r\nAdded extra line in `main` ` tf.config.experimental.set_visible_devices([],'GPU')` in order for the program to run properly with `tensorflow-gpu`, this is common in other `flax/examples`. \n", "before_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom absl import flags\nfrom absl import app\nimport jax\nimport jax.random\nfrom ml_collections import config_flags\n\nimport ppo_lib\nimport models\nimport env_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'logdir', default='/tmp/ppo_training',\n help=('Directory to save checkpoints and logging info.'))\n\nconfig_flags.DEFINE_config_file(\n 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),\n 'File path to the default configuration file.')\n\ndef main(argv):\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n print(f'Playing {game} with {num_actions} actions')\n key = jax.random.PRNGKey(0)\n key, subkey = jax.random.split(key)\n model = models.create_model(subkey, num_outputs=num_actions)\n optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)\n del model\n optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "examples/ppo/ppo_main.py"}, {"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Agent utilities, incl. choosing the move and running in separate process.\"\"\"\n\nimport multiprocessing\nimport collections\nimport jax\nimport numpy as onp\n\nimport env_utils\n\[email protected]\ndef policy_action(model, state):\n \"\"\"Forward pass of the network.\"\"\"\n out = model(state)\n return out\n\n\nExpTuple = collections.namedtuple(\n 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])\n\n\nclass RemoteSimulator:\n \"\"\"Wrap functionality for an agent emulating Atari in a separate process.\n\n An object of this class is created for every agent.\n \"\"\"\n\n def __init__(self, game: str):\n \"\"\"Start the remote process and create Pipe() to communicate with it.\"\"\"\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n self.conn = parent_conn\n self.proc.start()\n\n\ndef rcv_action_send_exp(conn, game: str):\n \"\"\"Run the remote agents.\n\n Receive action from the main learner, perform one step of simulation and\n send back collected experience.\n \"\"\"\n env = env_utils.create_env(game, clip_rewards=True)\n while True:\n obs = env.reset()\n done = False\n # Observations fetched from Atari env need additional batch dimension.\n state = obs[None, ...]\n while not done:\n conn.send(state)\n action = conn.recv()\n obs, reward, done, _ = env.step(action)\n next_state = obs[None, ...] if not done else None\n experience = (state, action, reward, done)\n conn.send(experience)\n if done:\n break\n state = next_state\n", "path": "examples/ppo/agent.py"}], "after_files": [{"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nfrom absl import flags\nfrom absl import app\nimport jax\nimport jax.random\nfrom ml_collections import config_flags\n\nimport tensorflow as tf\n\nimport ppo_lib\nimport models\nimport env_utils\n\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'logdir', default='/tmp/ppo_training',\n help=('Directory to save checkpoints and logging info.'))\n\nconfig_flags.DEFINE_config_file(\n 'config', os.path.join(os.path.dirname(__file__), 'default_config.py'),\n 'File path to the default configuration file.')\n\ndef main(argv):\n # Make sure tf does not allocate gpu memory.\n tf.config.experimental.set_visible_devices([], 'GPU')\n\n config = FLAGS.config\n game = config.game + 'NoFrameskip-v4'\n num_actions = env_utils.get_num_actions(game)\n print(f'Playing {game} with {num_actions} actions')\n key = jax.random.PRNGKey(0)\n key, subkey = jax.random.split(key)\n model = models.create_model(subkey, num_outputs=num_actions)\n optimizer = models.create_optimizer(model, learning_rate=config.learning_rate)\n del model\n optimizer = ppo_lib.train(optimizer, config, FLAGS.logdir)\n\nif __name__ == '__main__':\n app.run(main)\n", "path": "examples/ppo/ppo_main.py"}, {"content": "# Copyright 2020 The Flax Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Agent utilities, incl. choosing the move and running in separate process.\"\"\"\n\nimport multiprocessing\nimport collections\nimport jax\nimport numpy as onp\n\nimport env_utils\n\[email protected]\ndef policy_action(model, state):\n \"\"\"Forward pass of the network.\"\"\"\n out = model(state)\n return out\n\n\nExpTuple = collections.namedtuple(\n 'ExpTuple', ['state', 'action', 'reward', 'value', 'log_prob', 'done'])\n\n\nclass RemoteSimulator:\n \"\"\"Wrap functionality for an agent emulating Atari in a separate process.\n\n An object of this class is created for every agent.\n \"\"\"\n\n def __init__(self, game: str):\n \"\"\"Start the remote process and create Pipe() to communicate with it.\"\"\"\n parent_conn, child_conn = multiprocessing.Pipe()\n self.proc = multiprocessing.Process(\n target=rcv_action_send_exp, args=(child_conn, game))\n self.proc.daemon = True\n self.conn = parent_conn\n self.proc.start()\n\n\ndef rcv_action_send_exp(conn, game: str):\n \"\"\"Run the remote agents.\n\n Receive action from the main learner, perform one step of simulation and\n send back collected experience.\n \"\"\"\n env = env_utils.create_env(game, clip_rewards=True)\n while True:\n obs = env.reset()\n done = False\n # Observations fetched from Atari env need additional batch dimension.\n state = obs[None, ...]\n while not done:\n conn.send(state)\n action = conn.recv()\n obs, reward, done, _ = env.step(action)\n next_state = obs[None, ...] if not done else None\n experience = (state, action, reward, done)\n conn.send(experience)\n if done:\n break\n state = next_state\n", "path": "examples/ppo/agent.py"}]}
| 1,605 | 269 |
gh_patches_debug_619
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-4706
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
#6460 Previous/Next Button Poll Request Results no backround color
**URL:** https://meinberlin-dev.liqd.net/projekte/test-poll-merge-running-poll-with-user-content/
**user:** any
**expected behaviour:** Previous/Next button on the poll request results has a pink background.
**behaviour:** Button has no background. Only the outlines turn pink when the button is clicked
**important screensize:**
**device & browser:**
**Comment/Question:**
Screenshot?
dev:
<img width="286" alt="Bildschirmfoto 2022-11-09 um 05 38 05" src="https://user-images.githubusercontent.com/113356258/200740386-60d26bc2-f169-40e4-9730-79d6d8724dad.png">
<img width="220" alt="Bildschirmfoto 2022-11-09 um 05 40 30" src="https://user-images.githubusercontent.com/113356258/200740411-e40f6bf6-83ba-468f-a941-93bbfe045993.png">
stage:
<img width="189" alt="Bildschirmfoto 2022-11-09 um 05 44 21" src="https://user-images.githubusercontent.com/113356258/200740726-f116d498-cb19-4074-bd57-541f7d5d8d2a.png">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/ideas/views.py`
Content:
```
1 from django.contrib import messages
2 from django.db import transaction
3 from django.urls import reverse
4 from django.utils.translation import gettext_lazy as _
5 from django.views import generic
6
7 from adhocracy4.categories import filters as category_filters
8 from adhocracy4.exports.views import DashboardExportView
9 from adhocracy4.filters import filters as a4_filters
10 from adhocracy4.filters import views as filter_views
11 from adhocracy4.filters import widgets as filters_widgets
12 from adhocracy4.filters.filters import FreeTextFilter
13 from adhocracy4.labels import filters as label_filters
14 from adhocracy4.projects.mixins import DisplayProjectOrModuleMixin
15 from adhocracy4.projects.mixins import ProjectMixin
16 from adhocracy4.rules import mixins as rules_mixins
17 from meinberlin.apps.contrib import forms as contrib_forms
18 from meinberlin.apps.contrib.views import CanonicalURLDetailView
19 from meinberlin.apps.moderatorfeedback.forms import ModeratorStatementForm
20 from meinberlin.apps.moderatorfeedback.models import ModeratorStatement
21 from meinberlin.apps.notifications.emails import \
22 NotifyContactOnModeratorFeedback
23 from meinberlin.apps.notifications.emails import \
24 NotifyCreatorOnModeratorFeedback
25
26 from . import forms
27 from . import models
28
29
30 class FreeTextFilterWidget(filters_widgets.FreeTextFilterWidget):
31 label = _('Search')
32
33
34 def get_ordering_choices(view):
35 choices = (('-created', _('Most recent')),)
36 if view.module.has_feature('rate', models.Idea):
37 choices += ('-positive_rating_count', _('Most popular')),
38 choices += ('-comment_count', _('Most commented')),
39 return choices
40
41
42 class IdeaFilterSet(a4_filters.DefaultsFilterSet):
43 defaults = {
44 'ordering': '-created'
45 }
46 category = category_filters.CategoryFilter()
47 labels = label_filters.LabelFilter()
48 ordering = a4_filters.DynamicChoicesOrderingFilter(
49 choices=get_ordering_choices
50 )
51 search = FreeTextFilter(
52 widget=FreeTextFilterWidget,
53 fields=['name']
54 )
55
56 class Meta:
57 model = models.Idea
58 fields = ['search', 'labels', 'category']
59
60
61 class AbstractIdeaListView(ProjectMixin,
62 filter_views.FilteredListView):
63 paginate_by = 15
64
65
66 class IdeaListView(AbstractIdeaListView,
67 DisplayProjectOrModuleMixin
68 ):
69 model = models.Idea
70 filter_set = IdeaFilterSet
71
72 def get_queryset(self):
73 return super().get_queryset()\
74 .filter(module=self.module)
75
76
77 class AbstractIdeaDetailView(ProjectMixin,
78 rules_mixins.PermissionRequiredMixin,
79 CanonicalURLDetailView):
80 get_context_from_object = True
81
82
83 class IdeaDetailView(AbstractIdeaDetailView):
84 model = models.Idea
85 queryset = models.Idea.objects.annotate_positive_rating_count()\
86 .annotate_negative_rating_count()
87 permission_required = 'meinberlin_ideas.view_idea'
88
89
90 class AbstractIdeaCreateView(ProjectMixin,
91 rules_mixins.PermissionRequiredMixin,
92 generic.CreateView):
93 """Create an idea in the context of a module."""
94
95 def get_permission_object(self, *args, **kwargs):
96 return self.module
97
98 def form_valid(self, form):
99 form.instance.creator = self.request.user
100 form.instance.module = self.module
101 return super().form_valid(form)
102
103 def get_form_kwargs(self):
104 kwargs = super().get_form_kwargs()
105 kwargs['module'] = self.module
106 if self.module.settings_instance:
107 kwargs['settings_instance'] = self.module.settings_instance
108 return kwargs
109
110
111 class IdeaCreateView(AbstractIdeaCreateView):
112 model = models.Idea
113 form_class = forms.IdeaForm
114 permission_required = 'meinberlin_ideas.add_idea'
115 template_name = 'meinberlin_ideas/idea_create_form.html'
116
117
118 class AbstractIdeaUpdateView(ProjectMixin,
119 rules_mixins.PermissionRequiredMixin,
120 generic.UpdateView):
121 get_context_from_object = True
122
123 def get_form_kwargs(self):
124 kwargs = super().get_form_kwargs()
125 instance = kwargs.get('instance')
126 kwargs['module'] = instance.module
127 if instance.module.settings_instance:
128 kwargs['settings_instance'] = \
129 instance.module.settings_instance
130 return kwargs
131
132
133 class IdeaUpdateView(AbstractIdeaUpdateView):
134 model = models.Idea
135 form_class = forms.IdeaForm
136 permission_required = 'meinberlin_ideas.change_idea'
137 template_name = 'meinberlin_ideas/idea_update_form.html'
138
139
140 class AbstractIdeaDeleteView(ProjectMixin,
141 rules_mixins.PermissionRequiredMixin,
142 generic.DeleteView):
143 get_context_from_object = True
144
145 def get_success_url(self):
146 return reverse(
147 'project-detail', kwargs={'slug': self.project.slug})
148
149 def delete(self, request, *args, **kwargs):
150 messages.success(self.request, self.success_message)
151 return super(AbstractIdeaDeleteView, self)\
152 .delete(request, *args, **kwargs)
153
154
155 class IdeaDeleteView(AbstractIdeaDeleteView):
156 model = models.Idea
157 success_message = _('Your Idea has been deleted')
158 permission_required = 'meinberlin_ideas.change_idea'
159 template_name = 'meinberlin_ideas/idea_confirm_delete.html'
160
161
162 class AbstractIdeaModerateView(
163 ProjectMixin,
164 rules_mixins.PermissionRequiredMixin,
165 generic.detail.SingleObjectMixin,
166 generic.detail.SingleObjectTemplateResponseMixin,
167 contrib_forms.BaseMultiModelFormView):
168
169 get_context_from_object = True
170
171 def __init__(self):
172 self.forms = {
173 'moderateable': {
174 'model': self.model,
175 'form_class': self.moderateable_form_class
176 },
177 'statement': {
178 'model': ModeratorStatement,
179 'form_class': ModeratorStatementForm
180 }
181 }
182
183 def dispatch(self, *args, **kwargs):
184 self.object = self.get_object()
185 return super().dispatch(*args, **kwargs)
186
187 def get_success_url(self):
188 return self.object.get_absolute_url()
189
190 def forms_save(self, forms, commit=True):
191 objects = super().forms_save(forms, commit=False)
192 moderateable = objects['moderateable']
193 statement = objects['statement']
194
195 if not statement.pk:
196 statement.creator = self.request.user
197
198 with transaction.atomic():
199 statement.save()
200 moderateable.moderator_statement = statement
201 moderateable.save()
202 if hasattr(self.object, 'contact_email'):
203 NotifyContactOnModeratorFeedback.send(self.object)
204 else:
205 NotifyCreatorOnModeratorFeedback.send(self.object)
206 return objects
207
208 def get_instance(self, name):
209 if name == 'moderateable':
210 return self.object
211 elif name == 'statement':
212 return self.object.moderator_statement
213
214
215 class IdeaModerateView(AbstractIdeaModerateView):
216 model = models.Idea
217 permission_required = 'meinberlin_ideas.moderate_idea'
218 template_name = 'meinberlin_ideas/idea_moderate_form.html'
219 moderateable_form_class = forms.IdeaModerateForm
220
221
222 class IdeaDashboardExportView(DashboardExportView):
223 template_name = 'a4exports/export_dashboard.html'
224
225 def get_context_data(self, **kwargs):
226 context = super().get_context_data(**kwargs)
227 context['export'] = reverse(
228 'a4dashboard:idea-export',
229 kwargs={'module_slug': self.module.slug})
230 context['comment_export'] = reverse(
231 'a4dashboard:idea-comment-export',
232 kwargs={'module_slug': self.module.slug})
233 return context
234
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/ideas/views.py b/meinberlin/apps/ideas/views.py
--- a/meinberlin/apps/ideas/views.py
+++ b/meinberlin/apps/ideas/views.py
@@ -55,7 +55,7 @@
class Meta:
model = models.Idea
- fields = ['search', 'labels', 'category']
+ fields = ['search', 'category', 'labels']
class AbstractIdeaListView(ProjectMixin,
|
{"golden_diff": "diff --git a/meinberlin/apps/ideas/views.py b/meinberlin/apps/ideas/views.py\n--- a/meinberlin/apps/ideas/views.py\n+++ b/meinberlin/apps/ideas/views.py\n@@ -55,7 +55,7 @@\n \n class Meta:\n model = models.Idea\n- fields = ['search', 'labels', 'category']\n+ fields = ['search', 'category', 'labels']\n \n \n class AbstractIdeaListView(ProjectMixin,\n", "issue": "#6460 Previous/Next Button Poll Request Results no backround color\n**URL:** https://meinberlin-dev.liqd.net/projekte/test-poll-merge-running-poll-with-user-content/\r\n**user:** any\r\n**expected behaviour:** Previous/Next button on the poll request results has a pink background.\r\n**behaviour:** Button has no background. Only the outlines turn pink when the button is clicked\r\n**important screensize:**\r\n**device & browser:** \r\n**Comment/Question:** \r\n\r\nScreenshot?\r\n\r\ndev:\r\n<img width=\"286\" alt=\"Bildschirmfoto 2022-11-09 um 05 38 05\" src=\"https://user-images.githubusercontent.com/113356258/200740386-60d26bc2-f169-40e4-9730-79d6d8724dad.png\">\r\n<img width=\"220\" alt=\"Bildschirmfoto 2022-11-09 um 05 40 30\" src=\"https://user-images.githubusercontent.com/113356258/200740411-e40f6bf6-83ba-468f-a941-93bbfe045993.png\">\r\n\r\nstage:\r\n\r\n<img width=\"189\" alt=\"Bildschirmfoto 2022-11-09 um 05 44 21\" src=\"https://user-images.githubusercontent.com/113356258/200740726-f116d498-cb19-4074-bd57-541f7d5d8d2a.png\">\r\n\n", "before_files": [{"content": "from django.contrib import messages\nfrom django.db import transaction\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.categories import filters as category_filters\nfrom adhocracy4.exports.views import DashboardExportView\nfrom adhocracy4.filters import filters as a4_filters\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.filters import widgets as filters_widgets\nfrom adhocracy4.filters.filters import FreeTextFilter\nfrom adhocracy4.labels import filters as label_filters\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib import forms as contrib_forms\nfrom meinberlin.apps.contrib.views import CanonicalURLDetailView\nfrom meinberlin.apps.moderatorfeedback.forms import ModeratorStatementForm\nfrom meinberlin.apps.moderatorfeedback.models import ModeratorStatement\nfrom meinberlin.apps.notifications.emails import \\\n NotifyContactOnModeratorFeedback\nfrom meinberlin.apps.notifications.emails import \\\n NotifyCreatorOnModeratorFeedback\n\nfrom . import forms\nfrom . import models\n\n\nclass FreeTextFilterWidget(filters_widgets.FreeTextFilterWidget):\n label = _('Search')\n\n\ndef get_ordering_choices(view):\n choices = (('-created', _('Most recent')),)\n if view.module.has_feature('rate', models.Idea):\n choices += ('-positive_rating_count', _('Most popular')),\n choices += ('-comment_count', _('Most commented')),\n return choices\n\n\nclass IdeaFilterSet(a4_filters.DefaultsFilterSet):\n defaults = {\n 'ordering': '-created'\n }\n category = category_filters.CategoryFilter()\n labels = label_filters.LabelFilter()\n ordering = a4_filters.DynamicChoicesOrderingFilter(\n choices=get_ordering_choices\n )\n search = FreeTextFilter(\n widget=FreeTextFilterWidget,\n fields=['name']\n )\n\n class Meta:\n model = models.Idea\n fields = ['search', 'labels', 'category']\n\n\nclass AbstractIdeaListView(ProjectMixin,\n filter_views.FilteredListView):\n paginate_by = 15\n\n\nclass IdeaListView(AbstractIdeaListView,\n DisplayProjectOrModuleMixin\n ):\n model = models.Idea\n filter_set = IdeaFilterSet\n\n def get_queryset(self):\n return super().get_queryset()\\\n .filter(module=self.module)\n\n\nclass AbstractIdeaDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n CanonicalURLDetailView):\n get_context_from_object = True\n\n\nclass IdeaDetailView(AbstractIdeaDetailView):\n model = models.Idea\n queryset = models.Idea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n permission_required = 'meinberlin_ideas.view_idea'\n\n\nclass AbstractIdeaCreateView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.CreateView):\n \"\"\"Create an idea in the context of a module.\"\"\"\n\n def get_permission_object(self, *args, **kwargs):\n return self.module\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.module = self.module\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = self.module\n if self.module.settings_instance:\n kwargs['settings_instance'] = self.module.settings_instance\n return kwargs\n\n\nclass IdeaCreateView(AbstractIdeaCreateView):\n model = models.Idea\n form_class = forms.IdeaForm\n permission_required = 'meinberlin_ideas.add_idea'\n template_name = 'meinberlin_ideas/idea_create_form.html'\n\n\nclass AbstractIdeaUpdateView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.UpdateView):\n get_context_from_object = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n instance = kwargs.get('instance')\n kwargs['module'] = instance.module\n if instance.module.settings_instance:\n kwargs['settings_instance'] = \\\n instance.module.settings_instance\n return kwargs\n\n\nclass IdeaUpdateView(AbstractIdeaUpdateView):\n model = models.Idea\n form_class = forms.IdeaForm\n permission_required = 'meinberlin_ideas.change_idea'\n template_name = 'meinberlin_ideas/idea_update_form.html'\n\n\nclass AbstractIdeaDeleteView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DeleteView):\n get_context_from_object = True\n\n def get_success_url(self):\n return reverse(\n 'project-detail', kwargs={'slug': self.project.slug})\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super(AbstractIdeaDeleteView, self)\\\n .delete(request, *args, **kwargs)\n\n\nclass IdeaDeleteView(AbstractIdeaDeleteView):\n model = models.Idea\n success_message = _('Your Idea has been deleted')\n permission_required = 'meinberlin_ideas.change_idea'\n template_name = 'meinberlin_ideas/idea_confirm_delete.html'\n\n\nclass AbstractIdeaModerateView(\n ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.detail.SingleObjectMixin,\n generic.detail.SingleObjectTemplateResponseMixin,\n contrib_forms.BaseMultiModelFormView):\n\n get_context_from_object = True\n\n def __init__(self):\n self.forms = {\n 'moderateable': {\n 'model': self.model,\n 'form_class': self.moderateable_form_class\n },\n 'statement': {\n 'model': ModeratorStatement,\n 'form_class': ModeratorStatementForm\n }\n }\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_success_url(self):\n return self.object.get_absolute_url()\n\n def forms_save(self, forms, commit=True):\n objects = super().forms_save(forms, commit=False)\n moderateable = objects['moderateable']\n statement = objects['statement']\n\n if not statement.pk:\n statement.creator = self.request.user\n\n with transaction.atomic():\n statement.save()\n moderateable.moderator_statement = statement\n moderateable.save()\n if hasattr(self.object, 'contact_email'):\n NotifyContactOnModeratorFeedback.send(self.object)\n else:\n NotifyCreatorOnModeratorFeedback.send(self.object)\n return objects\n\n def get_instance(self, name):\n if name == 'moderateable':\n return self.object\n elif name == 'statement':\n return self.object.moderator_statement\n\n\nclass IdeaModerateView(AbstractIdeaModerateView):\n model = models.Idea\n permission_required = 'meinberlin_ideas.moderate_idea'\n template_name = 'meinberlin_ideas/idea_moderate_form.html'\n moderateable_form_class = forms.IdeaModerateForm\n\n\nclass IdeaDashboardExportView(DashboardExportView):\n template_name = 'a4exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['export'] = reverse(\n 'a4dashboard:idea-export',\n kwargs={'module_slug': self.module.slug})\n context['comment_export'] = reverse(\n 'a4dashboard:idea-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/ideas/views.py"}], "after_files": [{"content": "from django.contrib import messages\nfrom django.db import transaction\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django.views import generic\n\nfrom adhocracy4.categories import filters as category_filters\nfrom adhocracy4.exports.views import DashboardExportView\nfrom adhocracy4.filters import filters as a4_filters\nfrom adhocracy4.filters import views as filter_views\nfrom adhocracy4.filters import widgets as filters_widgets\nfrom adhocracy4.filters.filters import FreeTextFilter\nfrom adhocracy4.labels import filters as label_filters\nfrom adhocracy4.projects.mixins import DisplayProjectOrModuleMixin\nfrom adhocracy4.projects.mixins import ProjectMixin\nfrom adhocracy4.rules import mixins as rules_mixins\nfrom meinberlin.apps.contrib import forms as contrib_forms\nfrom meinberlin.apps.contrib.views import CanonicalURLDetailView\nfrom meinberlin.apps.moderatorfeedback.forms import ModeratorStatementForm\nfrom meinberlin.apps.moderatorfeedback.models import ModeratorStatement\nfrom meinberlin.apps.notifications.emails import \\\n NotifyContactOnModeratorFeedback\nfrom meinberlin.apps.notifications.emails import \\\n NotifyCreatorOnModeratorFeedback\n\nfrom . import forms\nfrom . import models\n\n\nclass FreeTextFilterWidget(filters_widgets.FreeTextFilterWidget):\n label = _('Search')\n\n\ndef get_ordering_choices(view):\n choices = (('-created', _('Most recent')),)\n if view.module.has_feature('rate', models.Idea):\n choices += ('-positive_rating_count', _('Most popular')),\n choices += ('-comment_count', _('Most commented')),\n return choices\n\n\nclass IdeaFilterSet(a4_filters.DefaultsFilterSet):\n defaults = {\n 'ordering': '-created'\n }\n category = category_filters.CategoryFilter()\n labels = label_filters.LabelFilter()\n ordering = a4_filters.DynamicChoicesOrderingFilter(\n choices=get_ordering_choices\n )\n search = FreeTextFilter(\n widget=FreeTextFilterWidget,\n fields=['name']\n )\n\n class Meta:\n model = models.Idea\n fields = ['search', 'category', 'labels']\n\n\nclass AbstractIdeaListView(ProjectMixin,\n filter_views.FilteredListView):\n paginate_by = 15\n\n\nclass IdeaListView(AbstractIdeaListView,\n DisplayProjectOrModuleMixin\n ):\n model = models.Idea\n filter_set = IdeaFilterSet\n\n def get_queryset(self):\n return super().get_queryset()\\\n .filter(module=self.module)\n\n\nclass AbstractIdeaDetailView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n CanonicalURLDetailView):\n get_context_from_object = True\n\n\nclass IdeaDetailView(AbstractIdeaDetailView):\n model = models.Idea\n queryset = models.Idea.objects.annotate_positive_rating_count()\\\n .annotate_negative_rating_count()\n permission_required = 'meinberlin_ideas.view_idea'\n\n\nclass AbstractIdeaCreateView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.CreateView):\n \"\"\"Create an idea in the context of a module.\"\"\"\n\n def get_permission_object(self, *args, **kwargs):\n return self.module\n\n def form_valid(self, form):\n form.instance.creator = self.request.user\n form.instance.module = self.module\n return super().form_valid(form)\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['module'] = self.module\n if self.module.settings_instance:\n kwargs['settings_instance'] = self.module.settings_instance\n return kwargs\n\n\nclass IdeaCreateView(AbstractIdeaCreateView):\n model = models.Idea\n form_class = forms.IdeaForm\n permission_required = 'meinberlin_ideas.add_idea'\n template_name = 'meinberlin_ideas/idea_create_form.html'\n\n\nclass AbstractIdeaUpdateView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.UpdateView):\n get_context_from_object = True\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n instance = kwargs.get('instance')\n kwargs['module'] = instance.module\n if instance.module.settings_instance:\n kwargs['settings_instance'] = \\\n instance.module.settings_instance\n return kwargs\n\n\nclass IdeaUpdateView(AbstractIdeaUpdateView):\n model = models.Idea\n form_class = forms.IdeaForm\n permission_required = 'meinberlin_ideas.change_idea'\n template_name = 'meinberlin_ideas/idea_update_form.html'\n\n\nclass AbstractIdeaDeleteView(ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.DeleteView):\n get_context_from_object = True\n\n def get_success_url(self):\n return reverse(\n 'project-detail', kwargs={'slug': self.project.slug})\n\n def delete(self, request, *args, **kwargs):\n messages.success(self.request, self.success_message)\n return super(AbstractIdeaDeleteView, self)\\\n .delete(request, *args, **kwargs)\n\n\nclass IdeaDeleteView(AbstractIdeaDeleteView):\n model = models.Idea\n success_message = _('Your Idea has been deleted')\n permission_required = 'meinberlin_ideas.change_idea'\n template_name = 'meinberlin_ideas/idea_confirm_delete.html'\n\n\nclass AbstractIdeaModerateView(\n ProjectMixin,\n rules_mixins.PermissionRequiredMixin,\n generic.detail.SingleObjectMixin,\n generic.detail.SingleObjectTemplateResponseMixin,\n contrib_forms.BaseMultiModelFormView):\n\n get_context_from_object = True\n\n def __init__(self):\n self.forms = {\n 'moderateable': {\n 'model': self.model,\n 'form_class': self.moderateable_form_class\n },\n 'statement': {\n 'model': ModeratorStatement,\n 'form_class': ModeratorStatementForm\n }\n }\n\n def dispatch(self, *args, **kwargs):\n self.object = self.get_object()\n return super().dispatch(*args, **kwargs)\n\n def get_success_url(self):\n return self.object.get_absolute_url()\n\n def forms_save(self, forms, commit=True):\n objects = super().forms_save(forms, commit=False)\n moderateable = objects['moderateable']\n statement = objects['statement']\n\n if not statement.pk:\n statement.creator = self.request.user\n\n with transaction.atomic():\n statement.save()\n moderateable.moderator_statement = statement\n moderateable.save()\n if hasattr(self.object, 'contact_email'):\n NotifyContactOnModeratorFeedback.send(self.object)\n else:\n NotifyCreatorOnModeratorFeedback.send(self.object)\n return objects\n\n def get_instance(self, name):\n if name == 'moderateable':\n return self.object\n elif name == 'statement':\n return self.object.moderator_statement\n\n\nclass IdeaModerateView(AbstractIdeaModerateView):\n model = models.Idea\n permission_required = 'meinberlin_ideas.moderate_idea'\n template_name = 'meinberlin_ideas/idea_moderate_form.html'\n moderateable_form_class = forms.IdeaModerateForm\n\n\nclass IdeaDashboardExportView(DashboardExportView):\n template_name = 'a4exports/export_dashboard.html'\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['export'] = reverse(\n 'a4dashboard:idea-export',\n kwargs={'module_slug': self.module.slug})\n context['comment_export'] = reverse(\n 'a4dashboard:idea-comment-export',\n kwargs={'module_slug': self.module.slug})\n return context\n", "path": "meinberlin/apps/ideas/views.py"}]}
| 2,898 | 107 |
gh_patches_debug_7204
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-1072
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Documentation for 0.7 says that it depends on pandas==0.23.4, but when I import modin it says it requires pandas==0.25
In the [modin documentation for 0.7]( https://modin.readthedocs.io/en/latest/installation.html#dependencies), it says that it depends on `pandas==0.23.4`, but when I install `modin==0.7` and try to import it, the following import error is thrown:
ImportError: The pandas version installed does not match the required pandas version in Modin. Please install pandas 0.25.3 to use Modin.
Is this an error in the documentation? Is there anyway I can use `modin==0.7` with `pandas==0.23.4` as I am using Dataiku DSS v6.0, which requires `pandas==0.23.4` and cannot be upgraded.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `modin/pandas/__init__.py`
Content:
```
1 import pandas
2
3 __pandas_version__ = "0.25.3"
4
5 if pandas.__version__ != __pandas_version__:
6 raise ImportError(
7 "The pandas version installed does not match the required pandas "
8 "version in Modin. Please install pandas {} to use "
9 "Modin.".format(__pandas_version__)
10 )
11
12 from pandas import (
13 eval,
14 unique,
15 value_counts,
16 cut,
17 to_numeric,
18 factorize,
19 test,
20 qcut,
21 date_range,
22 period_range,
23 Index,
24 MultiIndex,
25 CategoricalIndex,
26 bdate_range,
27 DatetimeIndex,
28 Timedelta,
29 Timestamp,
30 to_timedelta,
31 set_eng_float_format,
32 options,
33 set_option,
34 NaT,
35 PeriodIndex,
36 Categorical,
37 Interval,
38 UInt8Dtype,
39 UInt16Dtype,
40 UInt32Dtype,
41 UInt64Dtype,
42 SparseDtype,
43 Int8Dtype,
44 Int16Dtype,
45 Int32Dtype,
46 Int64Dtype,
47 CategoricalDtype,
48 DatetimeTZDtype,
49 IntervalDtype,
50 PeriodDtype,
51 RangeIndex,
52 Int64Index,
53 UInt64Index,
54 Float64Index,
55 TimedeltaIndex,
56 IntervalIndex,
57 IndexSlice,
58 Grouper,
59 array,
60 Period,
61 show_versions,
62 DateOffset,
63 timedelta_range,
64 infer_freq,
65 interval_range,
66 ExcelWriter,
67 SparseArray,
68 SparseSeries,
69 SparseDataFrame,
70 datetime,
71 NamedAgg,
72 )
73 import threading
74 import os
75 import types
76 import sys
77
78 from .. import __version__
79 from .concat import concat
80 from .dataframe import DataFrame
81 from .datetimes import to_datetime
82 from .io import (
83 read_csv,
84 read_parquet,
85 read_json,
86 read_html,
87 read_clipboard,
88 read_excel,
89 read_hdf,
90 read_feather,
91 read_msgpack,
92 read_stata,
93 read_sas,
94 read_pickle,
95 read_sql,
96 read_gbq,
97 read_table,
98 read_fwf,
99 read_sql_table,
100 read_sql_query,
101 read_spss,
102 ExcelFile,
103 to_pickle,
104 HDFStore,
105 )
106 from .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long
107 from .series import Series
108 from .general import (
109 isna,
110 isnull,
111 merge,
112 merge_asof,
113 merge_ordered,
114 pivot_table,
115 notnull,
116 notna,
117 pivot,
118 )
119 from .plotting import Plotting as plotting
120 from .. import __execution_engine__ as execution_engine
121
122 # Set this so that Pandas doesn't try to multithread by itself
123 os.environ["OMP_NUM_THREADS"] = "1"
124 num_cpus = 1
125
126
127 def initialize_ray():
128 import ray
129
130 """Initializes ray based on environment variables and internal defaults."""
131 if threading.current_thread().name == "MainThread":
132 plasma_directory = None
133 cluster = os.environ.get("MODIN_RAY_CLUSTER", None)
134 redis_address = os.environ.get("MODIN_REDIS_ADDRESS", None)
135 if cluster == "True" and redis_address is not None:
136 # We only start ray in a cluster setting for the head node.
137 ray.init(
138 include_webui=False,
139 ignore_reinit_error=True,
140 redis_address=redis_address,
141 logging_level=100,
142 )
143 elif cluster is None:
144 object_store_memory = os.environ.get("MODIN_MEMORY", None)
145 if os.environ.get("MODIN_OUT_OF_CORE", "False").title() == "True":
146 from tempfile import gettempdir
147
148 plasma_directory = gettempdir()
149 # We may have already set the memory from the environment variable, we don't
150 # want to overwrite that value if we have.
151 if object_store_memory is None:
152 # Round down to the nearest Gigabyte.
153 mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
154 # Default to 8x memory for out of core
155 object_store_memory = 8 * mem_bytes
156 # In case anything failed above, we can still improve the memory for Modin.
157 if object_store_memory is None:
158 # Round down to the nearest Gigabyte.
159 object_store_memory = int(
160 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9
161 )
162 # If the memory pool is smaller than 2GB, just use the default in ray.
163 if object_store_memory == 0:
164 object_store_memory = None
165 else:
166 object_store_memory = int(object_store_memory)
167 ray.init(
168 include_webui=False,
169 ignore_reinit_error=True,
170 plasma_directory=plasma_directory,
171 object_store_memory=object_store_memory,
172 redis_address=redis_address,
173 logging_level=100,
174 memory=object_store_memory,
175 )
176 # Register custom serializer for method objects to avoid warning message.
177 # We serialize `MethodType` objects when we use AxisPartition operations.
178 ray.register_custom_serializer(types.MethodType, use_pickle=True)
179
180 # Register a fix import function to run on all_workers including the driver.
181 # This is a hack solution to fix #647, #746
182 def move_stdlib_ahead_of_site_packages(*args):
183 site_packages_path = None
184 site_packages_path_index = -1
185 for i, path in enumerate(sys.path):
186 if sys.exec_prefix in path and path.endswith("site-packages"):
187 site_packages_path = path
188 site_packages_path_index = i
189 # break on first found
190 break
191
192 if site_packages_path is not None:
193 # stdlib packages layout as follows:
194 # - python3.x
195 # - typing.py
196 # - site-packages/
197 # - pandas
198 # So extracting the dirname of the site_packages can point us
199 # to the directory containing standard libraries.
200 sys.path.insert(
201 site_packages_path_index, os.path.dirname(site_packages_path)
202 )
203
204 move_stdlib_ahead_of_site_packages()
205 ray.worker.global_worker.run_function_on_all_workers(
206 move_stdlib_ahead_of_site_packages
207 )
208
209
210 if execution_engine == "Ray":
211 import ray
212
213 initialize_ray()
214 num_cpus = ray.cluster_resources()["CPU"]
215 elif execution_engine == "Dask": # pragma: no cover
216 from distributed.client import _get_global_client
217 import warnings
218
219 warnings.warn("The Dask Engine for Modin is experimental.")
220
221 if threading.current_thread().name == "MainThread":
222 # initialize the dask client
223 client = _get_global_client()
224 if client is None:
225 from distributed import Client
226 import multiprocessing
227
228 num_cpus = multiprocessing.cpu_count()
229 client = Client(n_workers=num_cpus)
230 elif execution_engine != "Python":
231 raise ImportError("Unrecognized execution engine: {}.".format(execution_engine))
232
233 DEFAULT_NPARTITIONS = max(4, int(num_cpus))
234
235 __all__ = [
236 "DataFrame",
237 "Series",
238 "read_csv",
239 "read_parquet",
240 "read_json",
241 "read_html",
242 "read_clipboard",
243 "read_excel",
244 "read_hdf",
245 "read_feather",
246 "read_msgpack",
247 "read_stata",
248 "read_sas",
249 "read_pickle",
250 "read_sql",
251 "read_gbq",
252 "read_table",
253 "read_spss",
254 "concat",
255 "eval",
256 "unique",
257 "value_counts",
258 "cut",
259 "to_numeric",
260 "factorize",
261 "test",
262 "qcut",
263 "to_datetime",
264 "get_dummies",
265 "isna",
266 "isnull",
267 "merge",
268 "pivot_table",
269 "date_range",
270 "Index",
271 "MultiIndex",
272 "Series",
273 "bdate_range",
274 "period_range",
275 "DatetimeIndex",
276 "to_timedelta",
277 "set_eng_float_format",
278 "options",
279 "set_option",
280 "CategoricalIndex",
281 "Timedelta",
282 "Timestamp",
283 "NaT",
284 "PeriodIndex",
285 "Categorical",
286 "__version__",
287 "melt",
288 "crosstab",
289 "plotting",
290 "Interval",
291 "UInt8Dtype",
292 "UInt16Dtype",
293 "UInt32Dtype",
294 "UInt64Dtype",
295 "SparseDtype",
296 "Int8Dtype",
297 "Int16Dtype",
298 "Int32Dtype",
299 "Int64Dtype",
300 "CategoricalDtype",
301 "DatetimeTZDtype",
302 "IntervalDtype",
303 "PeriodDtype",
304 "RangeIndex",
305 "Int64Index",
306 "UInt64Index",
307 "Float64Index",
308 "TimedeltaIndex",
309 "IntervalIndex",
310 "IndexSlice",
311 "Grouper",
312 "array",
313 "Period",
314 "show_versions",
315 "DateOffset",
316 "timedelta_range",
317 "infer_freq",
318 "interval_range",
319 "ExcelWriter",
320 "read_fwf",
321 "read_sql_table",
322 "read_sql_query",
323 "ExcelFile",
324 "to_pickle",
325 "HDFStore",
326 "lreshape",
327 "wide_to_long",
328 "merge_asof",
329 "merge_ordered",
330 "notnull",
331 "notna",
332 "pivot",
333 "SparseArray",
334 "SparseSeries",
335 "SparseDataFrame",
336 "datetime",
337 "NamedAgg",
338 "DEFAULT_NPARTITIONS",
339 ]
340
341 del pandas
342
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py
--- a/modin/pandas/__init__.py
+++ b/modin/pandas/__init__.py
@@ -3,10 +3,11 @@
__pandas_version__ = "0.25.3"
if pandas.__version__ != __pandas_version__:
- raise ImportError(
- "The pandas version installed does not match the required pandas "
- "version in Modin. Please install pandas {} to use "
- "Modin.".format(__pandas_version__)
+ import warnings
+
+ warnings.warn(
+ "The pandas version installed does not match the required pandas version in "
+ "Modin. This may cause undesired side effects!".format(__pandas_version__)
)
from pandas import (
|
{"golden_diff": "diff --git a/modin/pandas/__init__.py b/modin/pandas/__init__.py\n--- a/modin/pandas/__init__.py\n+++ b/modin/pandas/__init__.py\n@@ -3,10 +3,11 @@\n __pandas_version__ = \"0.25.3\"\n \n if pandas.__version__ != __pandas_version__:\n- raise ImportError(\n- \"The pandas version installed does not match the required pandas \"\n- \"version in Modin. Please install pandas {} to use \"\n- \"Modin.\".format(__pandas_version__)\n+ import warnings\n+\n+ warnings.warn(\n+ \"The pandas version installed does not match the required pandas version in \"\n+ \"Modin. This may cause undesired side effects!\".format(__pandas_version__)\n )\n \n from pandas import (\n", "issue": "Documentation for 0.7 says that it depends on pandas==0.23.4, but when I import modin it says it requires pandas==0.25\nIn the [modin documentation for 0.7]( https://modin.readthedocs.io/en/latest/installation.html#dependencies), it says that it depends on `pandas==0.23.4`, but when I install `modin==0.7` and try to import it, the following import error is thrown:\r\n\r\n ImportError: The pandas version installed does not match the required pandas version in Modin. Please install pandas 0.25.3 to use Modin.\r\n\r\nIs this an error in the documentation? Is there anyway I can use `modin==0.7` with `pandas==0.23.4` as I am using Dataiku DSS v6.0, which requires `pandas==0.23.4` and cannot be upgraded.\n", "before_files": [{"content": "import pandas\n\n__pandas_version__ = \"0.25.3\"\n\nif pandas.__version__ != __pandas_version__:\n raise ImportError(\n \"The pandas version installed does not match the required pandas \"\n \"version in Modin. Please install pandas {} to use \"\n \"Modin.\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n SparseArray,\n SparseSeries,\n SparseDataFrame,\n datetime,\n NamedAgg,\n)\nimport threading\nimport os\nimport types\nimport sys\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_msgpack,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n plasma_directory = None\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import _get_global_client\n import warnings\n\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n\n if threading.current_thread().name == \"MainThread\":\n # initialize the dask client\n client = _get_global_client()\n if client is None:\n from distributed import Client\n import multiprocessing\n\n num_cpus = multiprocessing.cpu_count()\n client = Client(n_workers=num_cpus)\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_msgpack\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"SparseArray\",\n \"SparseSeries\",\n \"SparseDataFrame\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}], "after_files": [{"content": "import pandas\n\n__pandas_version__ = \"0.25.3\"\n\nif pandas.__version__ != __pandas_version__:\n import warnings\n\n warnings.warn(\n \"The pandas version installed does not match the required pandas version in \"\n \"Modin. This may cause undesired side effects!\".format(__pandas_version__)\n )\n\nfrom pandas import (\n eval,\n unique,\n value_counts,\n cut,\n to_numeric,\n factorize,\n test,\n qcut,\n date_range,\n period_range,\n Index,\n MultiIndex,\n CategoricalIndex,\n bdate_range,\n DatetimeIndex,\n Timedelta,\n Timestamp,\n to_timedelta,\n set_eng_float_format,\n options,\n set_option,\n NaT,\n PeriodIndex,\n Categorical,\n Interval,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n SparseDtype,\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n RangeIndex,\n Int64Index,\n UInt64Index,\n Float64Index,\n TimedeltaIndex,\n IntervalIndex,\n IndexSlice,\n Grouper,\n array,\n Period,\n show_versions,\n DateOffset,\n timedelta_range,\n infer_freq,\n interval_range,\n ExcelWriter,\n SparseArray,\n SparseSeries,\n SparseDataFrame,\n datetime,\n NamedAgg,\n)\nimport threading\nimport os\nimport types\nimport sys\n\nfrom .. import __version__\nfrom .concat import concat\nfrom .dataframe import DataFrame\nfrom .datetimes import to_datetime\nfrom .io import (\n read_csv,\n read_parquet,\n read_json,\n read_html,\n read_clipboard,\n read_excel,\n read_hdf,\n read_feather,\n read_msgpack,\n read_stata,\n read_sas,\n read_pickle,\n read_sql,\n read_gbq,\n read_table,\n read_fwf,\n read_sql_table,\n read_sql_query,\n read_spss,\n ExcelFile,\n to_pickle,\n HDFStore,\n)\nfrom .reshape import get_dummies, melt, crosstab, lreshape, wide_to_long\nfrom .series import Series\nfrom .general import (\n isna,\n isnull,\n merge,\n merge_asof,\n merge_ordered,\n pivot_table,\n notnull,\n notna,\n pivot,\n)\nfrom .plotting import Plotting as plotting\nfrom .. import __execution_engine__ as execution_engine\n\n# Set this so that Pandas doesn't try to multithread by itself\nos.environ[\"OMP_NUM_THREADS\"] = \"1\"\nnum_cpus = 1\n\n\ndef initialize_ray():\n import ray\n\n \"\"\"Initializes ray based on environment variables and internal defaults.\"\"\"\n if threading.current_thread().name == \"MainThread\":\n plasma_directory = None\n cluster = os.environ.get(\"MODIN_RAY_CLUSTER\", None)\n redis_address = os.environ.get(\"MODIN_REDIS_ADDRESS\", None)\n if cluster == \"True\" and redis_address is not None:\n # We only start ray in a cluster setting for the head node.\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n redis_address=redis_address,\n logging_level=100,\n )\n elif cluster is None:\n object_store_memory = os.environ.get(\"MODIN_MEMORY\", None)\n if os.environ.get(\"MODIN_OUT_OF_CORE\", \"False\").title() == \"True\":\n from tempfile import gettempdir\n\n plasma_directory = gettempdir()\n # We may have already set the memory from the environment variable, we don't\n # want to overwrite that value if we have.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n mem_bytes = ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n # Default to 8x memory for out of core\n object_store_memory = 8 * mem_bytes\n # In case anything failed above, we can still improve the memory for Modin.\n if object_store_memory is None:\n # Round down to the nearest Gigabyte.\n object_store_memory = int(\n 0.6 * ray.utils.get_system_memory() // 10 ** 9 * 10 ** 9\n )\n # If the memory pool is smaller than 2GB, just use the default in ray.\n if object_store_memory == 0:\n object_store_memory = None\n else:\n object_store_memory = int(object_store_memory)\n ray.init(\n include_webui=False,\n ignore_reinit_error=True,\n plasma_directory=plasma_directory,\n object_store_memory=object_store_memory,\n redis_address=redis_address,\n logging_level=100,\n memory=object_store_memory,\n )\n # Register custom serializer for method objects to avoid warning message.\n # We serialize `MethodType` objects when we use AxisPartition operations.\n ray.register_custom_serializer(types.MethodType, use_pickle=True)\n\n # Register a fix import function to run on all_workers including the driver.\n # This is a hack solution to fix #647, #746\n def move_stdlib_ahead_of_site_packages(*args):\n site_packages_path = None\n site_packages_path_index = -1\n for i, path in enumerate(sys.path):\n if sys.exec_prefix in path and path.endswith(\"site-packages\"):\n site_packages_path = path\n site_packages_path_index = i\n # break on first found\n break\n\n if site_packages_path is not None:\n # stdlib packages layout as follows:\n # - python3.x\n # - typing.py\n # - site-packages/\n # - pandas\n # So extracting the dirname of the site_packages can point us\n # to the directory containing standard libraries.\n sys.path.insert(\n site_packages_path_index, os.path.dirname(site_packages_path)\n )\n\n move_stdlib_ahead_of_site_packages()\n ray.worker.global_worker.run_function_on_all_workers(\n move_stdlib_ahead_of_site_packages\n )\n\n\nif execution_engine == \"Ray\":\n import ray\n\n initialize_ray()\n num_cpus = ray.cluster_resources()[\"CPU\"]\nelif execution_engine == \"Dask\": # pragma: no cover\n from distributed.client import _get_global_client\n import warnings\n\n warnings.warn(\"The Dask Engine for Modin is experimental.\")\n\n if threading.current_thread().name == \"MainThread\":\n # initialize the dask client\n client = _get_global_client()\n if client is None:\n from distributed import Client\n import multiprocessing\n\n num_cpus = multiprocessing.cpu_count()\n client = Client(n_workers=num_cpus)\nelif execution_engine != \"Python\":\n raise ImportError(\"Unrecognized execution engine: {}.\".format(execution_engine))\n\nDEFAULT_NPARTITIONS = max(4, int(num_cpus))\n\n__all__ = [\n \"DataFrame\",\n \"Series\",\n \"read_csv\",\n \"read_parquet\",\n \"read_json\",\n \"read_html\",\n \"read_clipboard\",\n \"read_excel\",\n \"read_hdf\",\n \"read_feather\",\n \"read_msgpack\",\n \"read_stata\",\n \"read_sas\",\n \"read_pickle\",\n \"read_sql\",\n \"read_gbq\",\n \"read_table\",\n \"read_spss\",\n \"concat\",\n \"eval\",\n \"unique\",\n \"value_counts\",\n \"cut\",\n \"to_numeric\",\n \"factorize\",\n \"test\",\n \"qcut\",\n \"to_datetime\",\n \"get_dummies\",\n \"isna\",\n \"isnull\",\n \"merge\",\n \"pivot_table\",\n \"date_range\",\n \"Index\",\n \"MultiIndex\",\n \"Series\",\n \"bdate_range\",\n \"period_range\",\n \"DatetimeIndex\",\n \"to_timedelta\",\n \"set_eng_float_format\",\n \"options\",\n \"set_option\",\n \"CategoricalIndex\",\n \"Timedelta\",\n \"Timestamp\",\n \"NaT\",\n \"PeriodIndex\",\n \"Categorical\",\n \"__version__\",\n \"melt\",\n \"crosstab\",\n \"plotting\",\n \"Interval\",\n \"UInt8Dtype\",\n \"UInt16Dtype\",\n \"UInt32Dtype\",\n \"UInt64Dtype\",\n \"SparseDtype\",\n \"Int8Dtype\",\n \"Int16Dtype\",\n \"Int32Dtype\",\n \"Int64Dtype\",\n \"CategoricalDtype\",\n \"DatetimeTZDtype\",\n \"IntervalDtype\",\n \"PeriodDtype\",\n \"RangeIndex\",\n \"Int64Index\",\n \"UInt64Index\",\n \"Float64Index\",\n \"TimedeltaIndex\",\n \"IntervalIndex\",\n \"IndexSlice\",\n \"Grouper\",\n \"array\",\n \"Period\",\n \"show_versions\",\n \"DateOffset\",\n \"timedelta_range\",\n \"infer_freq\",\n \"interval_range\",\n \"ExcelWriter\",\n \"read_fwf\",\n \"read_sql_table\",\n \"read_sql_query\",\n \"ExcelFile\",\n \"to_pickle\",\n \"HDFStore\",\n \"lreshape\",\n \"wide_to_long\",\n \"merge_asof\",\n \"merge_ordered\",\n \"notnull\",\n \"notna\",\n \"pivot\",\n \"SparseArray\",\n \"SparseSeries\",\n \"SparseDataFrame\",\n \"datetime\",\n \"NamedAgg\",\n \"DEFAULT_NPARTITIONS\",\n]\n\ndel pandas\n", "path": "modin/pandas/__init__.py"}]}
| 3,497 | 183 |
gh_patches_debug_22186
|
rasdani/github-patches
|
git_diff
|
conan-io__conan-center-index-870
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[libsodium] libsodium/1.0.18: Recipe broken on Python < 3.6
The use of f-strings in the Python recipe causes the package to not be installed with a Python version that is less than 3.6. This is a serious problem for using conan with older distributions, such as Ubuntu Xenial or CentOS. Instead of f-strings, the `.format(...)` or `%` interpolation methods should be used.
### Package and Environment Details
* Package Name/Version: **libsodium/1.0.18**
### Conan output
```
ERROR: Error loading conanfile at '~/.conan/data/libsodium/1.0.18/_/_/export/conanfile.py': Unable to load conanfile in ~/.conan/data/libsodium/1.0.18/_/_/export/conanfile.py
File "/usr/lib/python3.5/imp.py", line 172, in load_source
module = _load(spec)
File "<frozen importlib._bootstrap>", line 693, in _load
File "<frozen importlib._bootstrap>", line 673, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 661, in exec_module
File "<frozen importlib._bootstrap_external>", line 767, in get_code
File "<frozen importlib._bootstrap_external>", line 727, in source_to_code
File "<frozen importlib._bootstrap>", line 222, in _call_with_frames_removed
File "~/.conan/data/libsodium/1.0.18/_/_/export/conanfile.py", line 126
raise ConanInvalidConfiguration(f"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}")
^
SyntaxError: invalid syntax
```
### Locations in Recipe
```
libsodium/1.0.18/conanfile.py
126: raise ConanInvalidConfiguration(f"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}")
148: raise ConanInvalidConfiguration(f"Unsupported os for libsodium: {self.settings.os}")
```
This is as far as I can tell the only package in this repository that uses f-strings.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `recipes/libsodium/1.0.18/conanfile.py`
Content:
```
1 from conans import ConanFile, AutoToolsBuildEnvironment, tools, MSBuild
2 from conans.errors import ConanInvalidConfiguration
3 import os
4
5
6 class LibsodiumConan(ConanFile):
7 name = "libsodium"
8 description = "A modern and easy-to-use crypto library."
9 license = "ISC"
10 url = "https://github.com/conan-io/conan-center-index"
11 homepage = "https://download.libsodium.org/doc/"
12 exports_sources = ["patches/**"]
13 settings = "os", "compiler", "arch", "build_type"
14 topics = ("sodium", "libsodium", "encryption", "signature", "hashing")
15 generators = "cmake"
16 _source_subfolder = "source_subfolder"
17
18 options = {
19 "shared" : [True, False],
20 "fPIC": [True, False],
21 "use_soname" : [True, False],
22 "PIE" : [True, False],
23 }
24
25 default_options = {
26 "shared": False,
27 "fPIC": True,
28 "use_soname": True,
29 "PIE": False,
30 }
31
32 @property
33 def _android_id_str(self):
34 return "androideabi" if str(self.settings.arch) in ["armv6", "armv7"] else "android"
35
36 @property
37 def _is_mingw(self):
38 return self.settings.os == "Windows" and self.settings.compiler == "gcc"
39
40 @property
41 def _vs_configuration(self):
42 configuration = ""
43 if self.options.shared:
44 configuration += "Dyn"
45 else:
46 configuration += "Static"
47 build_type = "Debug" if self.settings.build_type == "Debug" else "Release"
48 configuration += build_type
49 return configuration
50
51 @property
52 def _vs_sln_folder(self):
53 folder = {"14": "vs2015",
54 "15": "vs2017",
55 "16": "vs2019"}.get(str(self.settings.compiler.version), None)
56 if not folder:
57 raise ConanInvalidConfiguration("Unsupported msvc version: {}".format(self.settings.compiler.version))
58 return folder
59
60 def configure(self):
61 del self.settings.compiler.libcxx
62 del self.settings.compiler.cppstd
63
64 def config_options(self):
65 if self.settings.os == "Windows":
66 del self.options.fPIC
67
68 def build_requirements(self):
69 # There are several unix tools used (bash scripts for Emscripten, autoreconf on MinGW, etc...)
70 if self.settings.compiler != "Visual Studio" and tools.os_info.is_windows and \
71 not "CONAN_BASH_PATH" in os.environ and tools.os_info.detect_windows_subsystem() != "Windows":
72 self.build_requires("msys2/20190524")
73
74 def source(self):
75 tools.get(**self.conan_data["sources"][self.version])
76 extracted_dir = self.name + "-" + self.version
77 os.rename(extracted_dir, self._source_subfolder)
78
79 def _build_visual(self):
80 sln_path = os.path.join(self.build_folder, self._source_subfolder, "builds", "msvc", self._vs_sln_folder, "libsodium.sln")
81
82 msbuild = MSBuild(self)
83 msbuild.build(sln_path, upgrade_project=False, platforms={"x86": "Win32"}, build_type=self._vs_configuration)
84
85 def _build_autotools_impl(self, configure_args):
86 win_bash = False
87 if self._is_mingw:
88 win_bash = True
89
90 autotools = AutoToolsBuildEnvironment(self, win_bash=win_bash)
91 if self._is_mingw:
92 self.run("autoreconf -i", cwd=self._source_subfolder, win_bash=win_bash)
93 autotools.configure(args=configure_args, configure_dir=self._source_subfolder, host=False)
94 autotools.make(args=["-j%s" % str(tools.cpu_count())])
95 autotools.install()
96
97 def _build_autotools_linux(self, configure_args):
98 self._build_autotools_impl(configure_args)
99
100 def _build_autotools_emscripten(self, configure_args):
101 self.run("./dist-build/emscripten.sh --standard", cwd=self._source_subfolder)
102
103 def _build_autotools_android(self, configure_args):
104 host_arch = "%s-linux-%s" % (tools.to_android_abi(self.settings.arch), self._android_id_str)
105 configure_args.append("--host=%s" % host_arch)
106 self._build_autotools_impl(configure_args)
107
108 def _build_autotools_mingw(self, configure_args):
109 arch = "i686" if self.settings.arch == "x86" else self.settings.arch
110 host_arch = "%s-w64-mingw32" % arch
111 configure_args.append("--host=%s" % host_arch)
112 self._build_autotools_impl(configure_args)
113
114 def _build_autotools_darwin(self, configure_args):
115 os = "ios" if self.settings.os == "iOS" else "darwin"
116 host_arch = "%s-apple-%s" % (self.settings.arch, os)
117 configure_args.append("--host=%s" % host_arch)
118 self._build_autotools_impl(configure_args)
119
120 def _build_autotools_neutrino(self, configure_args):
121 neutrino_archs = {"x86_64":"x86_64-pc", "x86":"i586-pc", "armv7":"arm-unknown", "armv8": "aarch64-unknown"}
122 if self.settings.os.version == "7.0" and str(self.settings.arch) in neutrino_archs:
123 host_arch = "%s-nto-qnx7.0.0" % neutrino_archs[str(self.settings.arch)]
124 if self.settings.arch == "armv7":
125 host_arch += "eabi"
126 else:
127 raise ConanInvalidConfiguration(f"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}")
128 configure_args.append("--host=%s" % host_arch)
129 self._build_autotools_impl(configure_args)
130
131 def _build_autotools(self):
132 absolute_install_dir = os.path.abspath(os.path.join(".", "install"))
133 absolute_install_dir = absolute_install_dir.replace("\\", "/")
134 configure_args = self._get_configure_args(absolute_install_dir)
135
136 if self.settings.os == "Linux":
137 self._build_autotools_linux(configure_args)
138 elif self.settings.os == "Emscripten":
139 self._build_autotools_emscripten(configure_args)
140 elif self.settings.os == "Android":
141 self._build_autotools_android(configure_args)
142 elif tools.is_apple_os(self.settings.os):
143 self._build_autotools_darwin(configure_args)
144 elif self._is_mingw:
145 self._build_autotools_mingw(configure_args)
146 elif self.settings.os == "Neutrino":
147 self._build_autotools_neutrino(configure_args)
148 else:
149 raise ConanInvalidConfiguration(f"Unsupported os for libsodium: {self.settings.os}")
150
151 def build(self):
152 for patch in self.conan_data["patches"][self.version]:
153 tools.patch(**patch)
154 if self.settings.os == "Macos":
155 tools.replace_in_file(os.path.join(self._source_subfolder, "configure"), r"-install_name \$rpath/", "-install_name ")
156 if self.settings.compiler != "Visual Studio":
157 self._build_autotools()
158 else:
159 self._build_visual()
160
161 def package(self):
162 self.copy("*LICENSE", dst="licenses", keep_path=False)
163 if self.settings.compiler == "Visual Studio":
164 self._package_visual()
165 else:
166 self._package_autotools()
167
168 def package_info(self):
169 if self.settings.compiler == "Visual Studio":
170 if not self.options.shared:
171 self.cpp_info.defines = ["SODIUM_STATIC=1"]
172 self.cpp_info.libs = tools.collect_libs(self)
173 if self.settings.os == "Linux":
174 self.cpp_info.system_libs = ["pthread"]
175
176 def _package_autotools(self):
177 if self.settings.os == "Emscripten":
178 prefix = "%s/libsodium-js" % self._source_subfolder
179 else:
180 prefix = "install"
181 lib_folder = os.path.join(prefix, "lib")
182 self.copy("*.h", dst="include", src=os.path.join(prefix, "include"))
183 self.copy("*.a", dst="lib", src=lib_folder)
184 self.copy("*.so*", dst="lib", src=lib_folder, symlinks=True)
185 self.copy("*.dylib", dst="lib", src=lib_folder, symlinks=True)
186
187 def _package_visual(self):
188 self.copy("*.lib", dst="lib", keep_path=False)
189 self.copy("*.dll", dst="bin", keep_path=False)
190 inc_src = os.path.join(self._source_subfolder, "src", self.name, "include")
191 self.copy("*.h", src=inc_src, dst="include", keep_path=True, excludes=("*/private/*"))
192
193 def _autotools_bool_arg(self, arg_base_name, value):
194 prefix = "--enable-" if value else "--disable-"
195 return prefix + arg_base_name
196
197 def _get_configure_args(self, absolute_install_dir):
198 args = [
199 "--prefix=%s" % absolute_install_dir,
200 self._autotools_bool_arg("shared", self.options.shared),
201 self._autotools_bool_arg("static", not self.options.shared),
202 self._autotools_bool_arg("soname-versions", self.options.use_soname),
203 self._autotools_bool_arg("pie", self.options.PIE)
204 ]
205 if self.options.get_safe("fPIC"):
206 args.append("--with-pic")
207 return args
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/recipes/libsodium/1.0.18/conanfile.py b/recipes/libsodium/1.0.18/conanfile.py
--- a/recipes/libsodium/1.0.18/conanfile.py
+++ b/recipes/libsodium/1.0.18/conanfile.py
@@ -124,7 +124,7 @@
if self.settings.arch == "armv7":
host_arch += "eabi"
else:
- raise ConanInvalidConfiguration(f"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}")
+ raise ConanInvalidConfiguration("Unsupported arch or Neutrino version for libsodium: {} {}".format(self.settings.os, self.settings.arch))
configure_args.append("--host=%s" % host_arch)
self._build_autotools_impl(configure_args)
@@ -146,7 +146,7 @@
elif self.settings.os == "Neutrino":
self._build_autotools_neutrino(configure_args)
else:
- raise ConanInvalidConfiguration(f"Unsupported os for libsodium: {self.settings.os}")
+ raise ConanInvalidConfiguration("Unsupported os for libsodium: {}".format(self.settings.os))
def build(self):
for patch in self.conan_data["patches"][self.version]:
|
{"golden_diff": "diff --git a/recipes/libsodium/1.0.18/conanfile.py b/recipes/libsodium/1.0.18/conanfile.py\n--- a/recipes/libsodium/1.0.18/conanfile.py\n+++ b/recipes/libsodium/1.0.18/conanfile.py\n@@ -124,7 +124,7 @@\n if self.settings.arch == \"armv7\":\n host_arch += \"eabi\"\n else:\n- raise ConanInvalidConfiguration(f\"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}\")\n+ raise ConanInvalidConfiguration(\"Unsupported arch or Neutrino version for libsodium: {} {}\".format(self.settings.os, self.settings.arch))\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n \n@@ -146,7 +146,7 @@\n elif self.settings.os == \"Neutrino\":\n self._build_autotools_neutrino(configure_args)\n else:\n- raise ConanInvalidConfiguration(f\"Unsupported os for libsodium: {self.settings.os}\")\n+ raise ConanInvalidConfiguration(\"Unsupported os for libsodium: {}\".format(self.settings.os))\n \n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n", "issue": "[libsodium] libsodium/1.0.18: Recipe broken on Python < 3.6\nThe use of f-strings in the Python recipe causes the package to not be installed with a Python version that is less than 3.6. This is a serious problem for using conan with older distributions, such as Ubuntu Xenial or CentOS. Instead of f-strings, the `.format(...)` or `%` interpolation methods should be used.\r\n\r\n### Package and Environment Details\r\n * Package Name/Version: **libsodium/1.0.18**\r\n\r\n### Conan output\r\n```\r\nERROR: Error loading conanfile at '~/.conan/data/libsodium/1.0.18/_/_/export/conanfile.py': Unable to load conanfile in ~/.conan/data/libsodium/1.0.18/_/_/export/conanfile.py\r\n File \"/usr/lib/python3.5/imp.py\", line 172, in load_source\r\n module = _load(spec)\r\n File \"<frozen importlib._bootstrap>\", line 693, in _load\r\n File \"<frozen importlib._bootstrap>\", line 673, in _load_unlocked\r\n File \"<frozen importlib._bootstrap_external>\", line 661, in exec_module\r\n File \"<frozen importlib._bootstrap_external>\", line 767, in get_code\r\n File \"<frozen importlib._bootstrap_external>\", line 727, in source_to_code\r\n File \"<frozen importlib._bootstrap>\", line 222, in _call_with_frames_removed\r\n File \"~/.conan/data/libsodium/1.0.18/_/_/export/conanfile.py\", line 126\r\n raise ConanInvalidConfiguration(f\"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}\")\r\n ^\r\nSyntaxError: invalid syntax\r\n```\r\n\r\n### Locations in Recipe\r\n```\r\nlibsodium/1.0.18/conanfile.py\r\n126: raise ConanInvalidConfiguration(f\"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}\")\r\n148: raise ConanInvalidConfiguration(f\"Unsupported os for libsodium: {self.settings.os}\")\r\n```\r\n\r\nThis is as far as I can tell the only package in this repository that uses f-strings.\n", "before_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools, MSBuild\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass LibsodiumConan(ConanFile):\n name = \"libsodium\"\n description = \"A modern and easy-to-use crypto library.\"\n license = \"ISC\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://download.libsodium.org/doc/\"\n exports_sources = [\"patches/**\"]\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n topics = (\"sodium\", \"libsodium\", \"encryption\", \"signature\", \"hashing\")\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n options = {\n \"shared\" : [True, False],\n \"fPIC\": [True, False],\n \"use_soname\" : [True, False],\n \"PIE\" : [True, False],\n }\n\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"use_soname\": True,\n \"PIE\": False,\n }\n\n @property\n def _android_id_str(self):\n return \"androideabi\" if str(self.settings.arch) in [\"armv6\", \"armv7\"] else \"android\"\n\n @property\n def _is_mingw(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == \"gcc\"\n\n @property\n def _vs_configuration(self):\n configuration = \"\"\n if self.options.shared:\n configuration += \"Dyn\"\n else:\n configuration += \"Static\"\n build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n configuration += build_type\n return configuration\n\n @property\n def _vs_sln_folder(self):\n folder = {\"14\": \"vs2015\",\n \"15\": \"vs2017\",\n \"16\": \"vs2019\"}.get(str(self.settings.compiler.version), None)\n if not folder:\n raise ConanInvalidConfiguration(\"Unsupported msvc version: {}\".format(self.settings.compiler.version))\n return folder\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def build_requirements(self):\n # There are several unix tools used (bash scripts for Emscripten, autoreconf on MinGW, etc...)\n if self.settings.compiler != \"Visual Studio\" and tools.os_info.is_windows and \\\n not \"CONAN_BASH_PATH\" in os.environ and tools.os_info.detect_windows_subsystem() != \"Windows\":\n self.build_requires(\"msys2/20190524\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _build_visual(self):\n sln_path = os.path.join(self.build_folder, self._source_subfolder, \"builds\", \"msvc\", self._vs_sln_folder, \"libsodium.sln\")\n\n msbuild = MSBuild(self)\n msbuild.build(sln_path, upgrade_project=False, platforms={\"x86\": \"Win32\"}, build_type=self._vs_configuration)\n\n def _build_autotools_impl(self, configure_args):\n win_bash = False\n if self._is_mingw:\n win_bash = True\n\n autotools = AutoToolsBuildEnvironment(self, win_bash=win_bash)\n if self._is_mingw:\n self.run(\"autoreconf -i\", cwd=self._source_subfolder, win_bash=win_bash)\n autotools.configure(args=configure_args, configure_dir=self._source_subfolder, host=False)\n autotools.make(args=[\"-j%s\" % str(tools.cpu_count())])\n autotools.install()\n\n def _build_autotools_linux(self, configure_args):\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_emscripten(self, configure_args):\n self.run(\"./dist-build/emscripten.sh --standard\", cwd=self._source_subfolder)\n\n def _build_autotools_android(self, configure_args):\n host_arch = \"%s-linux-%s\" % (tools.to_android_abi(self.settings.arch), self._android_id_str)\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_mingw(self, configure_args):\n arch = \"i686\" if self.settings.arch == \"x86\" else self.settings.arch\n host_arch = \"%s-w64-mingw32\" % arch\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_darwin(self, configure_args):\n os = \"ios\" if self.settings.os == \"iOS\" else \"darwin\"\n host_arch = \"%s-apple-%s\" % (self.settings.arch, os)\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_neutrino(self, configure_args):\n neutrino_archs = {\"x86_64\":\"x86_64-pc\", \"x86\":\"i586-pc\", \"armv7\":\"arm-unknown\", \"armv8\": \"aarch64-unknown\"}\n if self.settings.os.version == \"7.0\" and str(self.settings.arch) in neutrino_archs:\n host_arch = \"%s-nto-qnx7.0.0\" % neutrino_archs[str(self.settings.arch)]\n if self.settings.arch == \"armv7\":\n host_arch += \"eabi\"\n else:\n raise ConanInvalidConfiguration(f\"Unsupported arch or Neutrino version for libsodium: {self.settings.os} {self.settings.arch}\")\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools(self):\n absolute_install_dir = os.path.abspath(os.path.join(\".\", \"install\"))\n absolute_install_dir = absolute_install_dir.replace(\"\\\\\", \"/\")\n configure_args = self._get_configure_args(absolute_install_dir)\n\n if self.settings.os == \"Linux\":\n self._build_autotools_linux(configure_args)\n elif self.settings.os == \"Emscripten\":\n self._build_autotools_emscripten(configure_args)\n elif self.settings.os == \"Android\":\n self._build_autotools_android(configure_args)\n elif tools.is_apple_os(self.settings.os):\n self._build_autotools_darwin(configure_args)\n elif self._is_mingw:\n self._build_autotools_mingw(configure_args)\n elif self.settings.os == \"Neutrino\":\n self._build_autotools_neutrino(configure_args)\n else:\n raise ConanInvalidConfiguration(f\"Unsupported os for libsodium: {self.settings.os}\")\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.settings.os == \"Macos\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"), r\"-install_name \\$rpath/\", \"-install_name \")\n if self.settings.compiler != \"Visual Studio\":\n self._build_autotools()\n else:\n self._build_visual()\n\n def package(self):\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n if self.settings.compiler == \"Visual Studio\":\n self._package_visual()\n else:\n self._package_autotools()\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\":\n if not self.options.shared:\n self.cpp_info.defines = [\"SODIUM_STATIC=1\"]\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"pthread\"]\n\n def _package_autotools(self):\n if self.settings.os == \"Emscripten\":\n prefix = \"%s/libsodium-js\" % self._source_subfolder\n else:\n prefix = \"install\"\n lib_folder = os.path.join(prefix, \"lib\")\n self.copy(\"*.h\", dst=\"include\", src=os.path.join(prefix, \"include\"))\n self.copy(\"*.a\", dst=\"lib\", src=lib_folder)\n self.copy(\"*.so*\", dst=\"lib\", src=lib_folder, symlinks=True)\n self.copy(\"*.dylib\", dst=\"lib\", src=lib_folder, symlinks=True)\n\n def _package_visual(self):\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n inc_src = os.path.join(self._source_subfolder, \"src\", self.name, \"include\")\n self.copy(\"*.h\", src=inc_src, dst=\"include\", keep_path=True, excludes=(\"*/private/*\"))\n\n def _autotools_bool_arg(self, arg_base_name, value):\n prefix = \"--enable-\" if value else \"--disable-\"\n return prefix + arg_base_name\n\n def _get_configure_args(self, absolute_install_dir):\n args = [\n \"--prefix=%s\" % absolute_install_dir,\n self._autotools_bool_arg(\"shared\", self.options.shared),\n self._autotools_bool_arg(\"static\", not self.options.shared),\n self._autotools_bool_arg(\"soname-versions\", self.options.use_soname),\n self._autotools_bool_arg(\"pie\", self.options.PIE)\n ]\n if self.options.get_safe(\"fPIC\"):\n args.append(\"--with-pic\")\n return args\n", "path": "recipes/libsodium/1.0.18/conanfile.py"}], "after_files": [{"content": "from conans import ConanFile, AutoToolsBuildEnvironment, tools, MSBuild\nfrom conans.errors import ConanInvalidConfiguration\nimport os\n\n\nclass LibsodiumConan(ConanFile):\n name = \"libsodium\"\n description = \"A modern and easy-to-use crypto library.\"\n license = \"ISC\"\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://download.libsodium.org/doc/\"\n exports_sources = [\"patches/**\"]\n settings = \"os\", \"compiler\", \"arch\", \"build_type\"\n topics = (\"sodium\", \"libsodium\", \"encryption\", \"signature\", \"hashing\")\n generators = \"cmake\"\n _source_subfolder = \"source_subfolder\"\n\n options = {\n \"shared\" : [True, False],\n \"fPIC\": [True, False],\n \"use_soname\" : [True, False],\n \"PIE\" : [True, False],\n }\n\n default_options = {\n \"shared\": False,\n \"fPIC\": True,\n \"use_soname\": True,\n \"PIE\": False,\n }\n\n @property\n def _android_id_str(self):\n return \"androideabi\" if str(self.settings.arch) in [\"armv6\", \"armv7\"] else \"android\"\n\n @property\n def _is_mingw(self):\n return self.settings.os == \"Windows\" and self.settings.compiler == \"gcc\"\n\n @property\n def _vs_configuration(self):\n configuration = \"\"\n if self.options.shared:\n configuration += \"Dyn\"\n else:\n configuration += \"Static\"\n build_type = \"Debug\" if self.settings.build_type == \"Debug\" else \"Release\"\n configuration += build_type\n return configuration\n\n @property\n def _vs_sln_folder(self):\n folder = {\"14\": \"vs2015\",\n \"15\": \"vs2017\",\n \"16\": \"vs2019\"}.get(str(self.settings.compiler.version), None)\n if not folder:\n raise ConanInvalidConfiguration(\"Unsupported msvc version: {}\".format(self.settings.compiler.version))\n return folder\n\n def configure(self):\n del self.settings.compiler.libcxx\n del self.settings.compiler.cppstd\n\n def config_options(self):\n if self.settings.os == \"Windows\":\n del self.options.fPIC\n\n def build_requirements(self):\n # There are several unix tools used (bash scripts for Emscripten, autoreconf on MinGW, etc...)\n if self.settings.compiler != \"Visual Studio\" and tools.os_info.is_windows and \\\n not \"CONAN_BASH_PATH\" in os.environ and tools.os_info.detect_windows_subsystem() != \"Windows\":\n self.build_requires(\"msys2/20190524\")\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = self.name + \"-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def _build_visual(self):\n sln_path = os.path.join(self.build_folder, self._source_subfolder, \"builds\", \"msvc\", self._vs_sln_folder, \"libsodium.sln\")\n\n msbuild = MSBuild(self)\n msbuild.build(sln_path, upgrade_project=False, platforms={\"x86\": \"Win32\"}, build_type=self._vs_configuration)\n\n def _build_autotools_impl(self, configure_args):\n win_bash = False\n if self._is_mingw:\n win_bash = True\n\n autotools = AutoToolsBuildEnvironment(self, win_bash=win_bash)\n if self._is_mingw:\n self.run(\"autoreconf -i\", cwd=self._source_subfolder, win_bash=win_bash)\n autotools.configure(args=configure_args, configure_dir=self._source_subfolder, host=False)\n autotools.make(args=[\"-j%s\" % str(tools.cpu_count())])\n autotools.install()\n\n def _build_autotools_linux(self, configure_args):\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_emscripten(self, configure_args):\n self.run(\"./dist-build/emscripten.sh --standard\", cwd=self._source_subfolder)\n\n def _build_autotools_android(self, configure_args):\n host_arch = \"%s-linux-%s\" % (tools.to_android_abi(self.settings.arch), self._android_id_str)\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_mingw(self, configure_args):\n arch = \"i686\" if self.settings.arch == \"x86\" else self.settings.arch\n host_arch = \"%s-w64-mingw32\" % arch\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_darwin(self, configure_args):\n os = \"ios\" if self.settings.os == \"iOS\" else \"darwin\"\n host_arch = \"%s-apple-%s\" % (self.settings.arch, os)\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools_neutrino(self, configure_args):\n neutrino_archs = {\"x86_64\":\"x86_64-pc\", \"x86\":\"i586-pc\", \"armv7\":\"arm-unknown\", \"armv8\": \"aarch64-unknown\"}\n if self.settings.os.version == \"7.0\" and str(self.settings.arch) in neutrino_archs:\n host_arch = \"%s-nto-qnx7.0.0\" % neutrino_archs[str(self.settings.arch)]\n if self.settings.arch == \"armv7\":\n host_arch += \"eabi\"\n else:\n raise ConanInvalidConfiguration(\"Unsupported arch or Neutrino version for libsodium: {} {}\".format(self.settings.os, self.settings.arch))\n configure_args.append(\"--host=%s\" % host_arch)\n self._build_autotools_impl(configure_args)\n\n def _build_autotools(self):\n absolute_install_dir = os.path.abspath(os.path.join(\".\", \"install\"))\n absolute_install_dir = absolute_install_dir.replace(\"\\\\\", \"/\")\n configure_args = self._get_configure_args(absolute_install_dir)\n\n if self.settings.os == \"Linux\":\n self._build_autotools_linux(configure_args)\n elif self.settings.os == \"Emscripten\":\n self._build_autotools_emscripten(configure_args)\n elif self.settings.os == \"Android\":\n self._build_autotools_android(configure_args)\n elif tools.is_apple_os(self.settings.os):\n self._build_autotools_darwin(configure_args)\n elif self._is_mingw:\n self._build_autotools_mingw(configure_args)\n elif self.settings.os == \"Neutrino\":\n self._build_autotools_neutrino(configure_args)\n else:\n raise ConanInvalidConfiguration(\"Unsupported os for libsodium: {}\".format(self.settings.os))\n\n def build(self):\n for patch in self.conan_data[\"patches\"][self.version]:\n tools.patch(**patch)\n if self.settings.os == \"Macos\":\n tools.replace_in_file(os.path.join(self._source_subfolder, \"configure\"), r\"-install_name \\$rpath/\", \"-install_name \")\n if self.settings.compiler != \"Visual Studio\":\n self._build_autotools()\n else:\n self._build_visual()\n\n def package(self):\n self.copy(\"*LICENSE\", dst=\"licenses\", keep_path=False)\n if self.settings.compiler == \"Visual Studio\":\n self._package_visual()\n else:\n self._package_autotools()\n\n def package_info(self):\n if self.settings.compiler == \"Visual Studio\":\n if not self.options.shared:\n self.cpp_info.defines = [\"SODIUM_STATIC=1\"]\n self.cpp_info.libs = tools.collect_libs(self)\n if self.settings.os == \"Linux\":\n self.cpp_info.system_libs = [\"pthread\"]\n\n def _package_autotools(self):\n if self.settings.os == \"Emscripten\":\n prefix = \"%s/libsodium-js\" % self._source_subfolder\n else:\n prefix = \"install\"\n lib_folder = os.path.join(prefix, \"lib\")\n self.copy(\"*.h\", dst=\"include\", src=os.path.join(prefix, \"include\"))\n self.copy(\"*.a\", dst=\"lib\", src=lib_folder)\n self.copy(\"*.so*\", dst=\"lib\", src=lib_folder, symlinks=True)\n self.copy(\"*.dylib\", dst=\"lib\", src=lib_folder, symlinks=True)\n\n def _package_visual(self):\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n inc_src = os.path.join(self._source_subfolder, \"src\", self.name, \"include\")\n self.copy(\"*.h\", src=inc_src, dst=\"include\", keep_path=True, excludes=(\"*/private/*\"))\n\n def _autotools_bool_arg(self, arg_base_name, value):\n prefix = \"--enable-\" if value else \"--disable-\"\n return prefix + arg_base_name\n\n def _get_configure_args(self, absolute_install_dir):\n args = [\n \"--prefix=%s\" % absolute_install_dir,\n self._autotools_bool_arg(\"shared\", self.options.shared),\n self._autotools_bool_arg(\"static\", not self.options.shared),\n self._autotools_bool_arg(\"soname-versions\", self.options.use_soname),\n self._autotools_bool_arg(\"pie\", self.options.PIE)\n ]\n if self.options.get_safe(\"fPIC\"):\n args.append(\"--with-pic\")\n return args\n", "path": "recipes/libsodium/1.0.18/conanfile.py"}]}
| 3,480 | 292 |
gh_patches_debug_921
|
rasdani/github-patches
|
git_diff
|
tensorflow__addons-1941
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Usage with tf.keras API
https://github.com/tensorflow/addons/blob/5f618fdb92d9737da059de2a33fa606e97505398/tensorflow_addons/losses/focal_loss.py#L52-L53
The usage in `tf.keras` API example is incorrect. It should be replaced with:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `tensorflow_addons/losses/focal_loss.py`
Content:
```
1 # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 # ==============================================================================
15 """Implements Focal loss."""
16
17 import tensorflow as tf
18 import tensorflow.keras.backend as K
19
20 from tensorflow_addons.utils.keras_utils import LossFunctionWrapper
21 from tensorflow_addons.utils.types import FloatTensorLike, TensorLike
22 from typeguard import typechecked
23
24
25 @tf.keras.utils.register_keras_serializable(package="Addons")
26 class SigmoidFocalCrossEntropy(LossFunctionWrapper):
27 """Implements the focal loss function.
28
29 Focal loss was first introduced in the RetinaNet paper
30 (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for
31 classification when you have highly imbalanced classes. It down-weights
32 well-classified examples and focuses on hard examples. The loss value is
33 much high for a sample which is misclassified by the classifier as compared
34 to the loss value corresponding to a well-classified example. One of the
35 best use-cases of focal loss is its usage in object detection where the
36 imbalance between the background class and other classes is extremely high.
37
38 Usage:
39
40 ```python
41 fl = tfa.losses.SigmoidFocalCrossEntropy()
42 loss = fl(
43 y_true = [[1.0], [1.0], [0.0]],
44 y_pred = [[0.97], [0.91], [0.03]])
45 print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,
46 1.9097870e-04,
47 2.0559824e-05]
48 ```
49 Usage with tf.keras API:
50
51 ```python
52 model = tf.keras.Model(inputs, outputs)
53 model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())
54 ```
55
56 Args
57 alpha: balancing factor, default value is 0.25
58 gamma: modulating factor, default value is 2.0
59
60 Returns:
61 Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
62 shape as `y_true`; otherwise, it is scalar.
63
64 Raises:
65 ValueError: If the shape of `sample_weight` is invalid or value of
66 `gamma` is less than zero
67 """
68
69 @typechecked
70 def __init__(
71 self,
72 from_logits: bool = False,
73 alpha: FloatTensorLike = 0.25,
74 gamma: FloatTensorLike = 2.0,
75 reduction: str = tf.keras.losses.Reduction.NONE,
76 name: str = "sigmoid_focal_crossentropy",
77 ):
78 super().__init__(
79 sigmoid_focal_crossentropy,
80 name=name,
81 reduction=reduction,
82 from_logits=from_logits,
83 alpha=alpha,
84 gamma=gamma,
85 )
86
87
88 @tf.keras.utils.register_keras_serializable(package="Addons")
89 @tf.function
90 def sigmoid_focal_crossentropy(
91 y_true: TensorLike,
92 y_pred: TensorLike,
93 alpha: FloatTensorLike = 0.25,
94 gamma: FloatTensorLike = 2.0,
95 from_logits: bool = False,
96 ) -> tf.Tensor:
97 """
98 Args
99 y_true: true targets tensor.
100 y_pred: predictions tensor.
101 alpha: balancing factor.
102 gamma: modulating factor.
103
104 Returns:
105 Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the
106 same shape as `y_true`; otherwise, it is scalar.
107 """
108 if gamma and gamma < 0:
109 raise ValueError("Value of gamma should be greater than or equal to zero")
110
111 y_pred = tf.convert_to_tensor(y_pred)
112 y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)
113
114 # Get the cross_entropy for each entry
115 ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)
116
117 # If logits are provided then convert the predictions into probabilities
118 if from_logits:
119 pred_prob = tf.sigmoid(y_pred)
120 else:
121 pred_prob = y_pred
122
123 p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))
124 alpha_factor = 1.0
125 modulating_factor = 1.0
126
127 if alpha:
128 alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())
129 alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)
130
131 if gamma:
132 gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())
133 modulating_factor = tf.pow((1.0 - p_t), gamma)
134
135 # compute the final loss and return
136 return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)
137
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py
--- a/tensorflow_addons/losses/focal_loss.py
+++ b/tensorflow_addons/losses/focal_loss.py
@@ -50,7 +50,7 @@
```python
model = tf.keras.Model(inputs, outputs)
- model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())
+ model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())
```
Args
|
{"golden_diff": "diff --git a/tensorflow_addons/losses/focal_loss.py b/tensorflow_addons/losses/focal_loss.py\n--- a/tensorflow_addons/losses/focal_loss.py\n+++ b/tensorflow_addons/losses/focal_loss.py\n@@ -50,7 +50,7 @@\n \n ```python\n model = tf.keras.Model(inputs, outputs)\n- model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())\n+ model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n ```\n \n Args\n", "issue": "Usage with tf.keras API\nhttps://github.com/tensorflow/addons/blob/5f618fdb92d9737da059de2a33fa606e97505398/tensorflow_addons/losses/focal_loss.py#L52-L53\r\n\r\nThe usage in `tf.keras` API example is incorrect. It should be replaced with:\r\n\r\n```python\r\nmodel = tf.keras.Model(inputs, outputs)\r\nmodel.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\r\n```\n", "before_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Focal loss.\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\nfrom tensorflow_addons.utils.keras_utils import LossFunctionWrapper\nfrom tensorflow_addons.utils.types import FloatTensorLike, TensorLike\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SigmoidFocalCrossEntropy(LossFunctionWrapper):\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n ```python\n fl = tfa.losses.SigmoidFocalCrossEntropy()\n loss = fl(\n y_true = [[1.0], [1.0], [0.0]],\n y_pred = [[0.97], [0.91], [0.03]])\n print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,\n 1.9097870e-04,\n 2.0559824e-05]\n ```\n Usage with tf.keras API:\n\n ```python\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tf.keras.losses.SigmoidFocalCrossEntropy())\n ```\n\n Args\n alpha: balancing factor, default value is 0.25\n gamma: modulating factor, default value is 2.0\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n from_logits: bool = False,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n reduction: str = tf.keras.losses.Reduction.NONE,\n name: str = \"sigmoid_focal_crossentropy\",\n ):\n super().__init__(\n sigmoid_focal_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n alpha=alpha,\n gamma=gamma,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\[email protected]\ndef sigmoid_focal_crossentropy(\n y_true: TensorLike,\n y_pred: TensorLike,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n from_logits: bool = False,\n) -> tf.Tensor:\n \"\"\"\n Args\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)\n", "path": "tensorflow_addons/losses/focal_loss.py"}], "after_files": [{"content": "# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Implements Focal loss.\"\"\"\n\nimport tensorflow as tf\nimport tensorflow.keras.backend as K\n\nfrom tensorflow_addons.utils.keras_utils import LossFunctionWrapper\nfrom tensorflow_addons.utils.types import FloatTensorLike, TensorLike\nfrom typeguard import typechecked\n\n\[email protected]_keras_serializable(package=\"Addons\")\nclass SigmoidFocalCrossEntropy(LossFunctionWrapper):\n \"\"\"Implements the focal loss function.\n\n Focal loss was first introduced in the RetinaNet paper\n (https://arxiv.org/pdf/1708.02002.pdf). Focal loss is extremely useful for\n classification when you have highly imbalanced classes. It down-weights\n well-classified examples and focuses on hard examples. The loss value is\n much high for a sample which is misclassified by the classifier as compared\n to the loss value corresponding to a well-classified example. One of the\n best use-cases of focal loss is its usage in object detection where the\n imbalance between the background class and other classes is extremely high.\n\n Usage:\n\n ```python\n fl = tfa.losses.SigmoidFocalCrossEntropy()\n loss = fl(\n y_true = [[1.0], [1.0], [0.0]],\n y_pred = [[0.97], [0.91], [0.03]])\n print('Loss: ', loss.numpy()) # Loss: [6.8532745e-06,\n 1.9097870e-04,\n 2.0559824e-05]\n ```\n Usage with tf.keras API:\n\n ```python\n model = tf.keras.Model(inputs, outputs)\n model.compile('sgd', loss=tfa.losses.SigmoidFocalCrossEntropy())\n ```\n\n Args\n alpha: balancing factor, default value is 0.25\n gamma: modulating factor, default value is 2.0\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same\n shape as `y_true`; otherwise, it is scalar.\n\n Raises:\n ValueError: If the shape of `sample_weight` is invalid or value of\n `gamma` is less than zero\n \"\"\"\n\n @typechecked\n def __init__(\n self,\n from_logits: bool = False,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n reduction: str = tf.keras.losses.Reduction.NONE,\n name: str = \"sigmoid_focal_crossentropy\",\n ):\n super().__init__(\n sigmoid_focal_crossentropy,\n name=name,\n reduction=reduction,\n from_logits=from_logits,\n alpha=alpha,\n gamma=gamma,\n )\n\n\[email protected]_keras_serializable(package=\"Addons\")\[email protected]\ndef sigmoid_focal_crossentropy(\n y_true: TensorLike,\n y_pred: TensorLike,\n alpha: FloatTensorLike = 0.25,\n gamma: FloatTensorLike = 2.0,\n from_logits: bool = False,\n) -> tf.Tensor:\n \"\"\"\n Args\n y_true: true targets tensor.\n y_pred: predictions tensor.\n alpha: balancing factor.\n gamma: modulating factor.\n\n Returns:\n Weighted loss float `Tensor`. If `reduction` is `NONE`,this has the\n same shape as `y_true`; otherwise, it is scalar.\n \"\"\"\n if gamma and gamma < 0:\n raise ValueError(\"Value of gamma should be greater than or equal to zero\")\n\n y_pred = tf.convert_to_tensor(y_pred)\n y_true = tf.convert_to_tensor(y_true, dtype=y_pred.dtype)\n\n # Get the cross_entropy for each entry\n ce = K.binary_crossentropy(y_true, y_pred, from_logits=from_logits)\n\n # If logits are provided then convert the predictions into probabilities\n if from_logits:\n pred_prob = tf.sigmoid(y_pred)\n else:\n pred_prob = y_pred\n\n p_t = (y_true * pred_prob) + ((1 - y_true) * (1 - pred_prob))\n alpha_factor = 1.0\n modulating_factor = 1.0\n\n if alpha:\n alpha = tf.convert_to_tensor(alpha, dtype=K.floatx())\n alpha_factor = y_true * alpha + (1 - y_true) * (1 - alpha)\n\n if gamma:\n gamma = tf.convert_to_tensor(gamma, dtype=K.floatx())\n modulating_factor = tf.pow((1.0 - p_t), gamma)\n\n # compute the final loss and return\n return tf.reduce_sum(alpha_factor * modulating_factor * ce, axis=-1)\n", "path": "tensorflow_addons/losses/focal_loss.py"}]}
| 1,871 | 133 |
gh_patches_debug_30444
|
rasdani/github-patches
|
git_diff
|
dask__dask-618
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add Base.to_graphviz
Add function to return `graphviz` instance created from dask graph for below reasons:
- When using IPython, `.visualize` outputs unnecessary image file
- Sometimes we want to modify graphviz instance directly
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dask/dot.py`
Content:
```
1 from __future__ import absolute_import, division, print_function
2
3 import re
4 from subprocess import check_call, CalledProcessError
5
6 from graphviz import Digraph
7
8 from .core import istask, get_dependencies, ishashable
9
10
11 def task_label(task):
12 """Label for a task on a dot graph.
13
14 Examples
15 --------
16 >>> from operator import add
17 >>> task_label((add, 1, 2))
18 'add'
19 >>> task_label((add, (add, 1, 2), 3))
20 'add(...)'
21 """
22 func = task[0]
23 if hasattr(func, 'funcs'):
24 if len(func.funcs) > 1:
25 return '{0}(...)'.format(funcname(func.funcs[0]))
26 else:
27 head = funcname(func.funcs[0])
28 else:
29 head = funcname(task[0])
30 if any(has_sub_tasks(i) for i in task[1:]):
31 return '{0}(...)'.format(head)
32 else:
33 return head
34
35
36 def has_sub_tasks(task):
37 """Returns True if the task has sub tasks"""
38 if istask(task):
39 return True
40 elif isinstance(task, list):
41 return any(has_sub_tasks(i) for i in task)
42 else:
43 return False
44
45
46 def funcname(func):
47 """Get the name of a function."""
48 while hasattr(func, 'func'):
49 func = func.func
50 return func.__name__
51
52
53 def name(x):
54 try:
55 return str(hash(x))
56 except TypeError:
57 return str(hash(str(x)))
58
59
60 _HASHPAT = re.compile('([0-9a-z]{32})')
61
62
63 def label(x, cache=None):
64 """
65
66 >>> label('x')
67 'x'
68
69 >>> label(('x', 1))
70 "('x', 1)"
71
72 >>> from hashlib import md5
73 >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()
74 >>> x
75 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'
76
77 >>> label(x)
78 'x-#-hello'
79 """
80 s = str(x)
81 m = re.search(_HASHPAT, s)
82 if m is not None:
83 for h in m.groups():
84 if cache is not None:
85 n = cache.get(h, len(cache))
86 label = '#{0}'.format(n)
87 # cache will be overwritten destructively
88 cache[h] = n
89 else:
90 label = '#'
91 s = s.replace(h, label)
92 return s
93
94
95 def to_graphviz(dsk, data_attributes=None, function_attributes=None):
96 if data_attributes is None:
97 data_attributes = {}
98 if function_attributes is None:
99 function_attributes = {}
100
101 g = Digraph(graph_attr={'rankdir': 'BT'})
102
103 seen = set()
104 cache = {}
105
106 for k, v in dsk.items():
107 k_name = name(k)
108 if k_name not in seen:
109 seen.add(k_name)
110 g.node(k_name, label=label(k, cache=cache), shape='box',
111 **data_attributes.get(k, {}))
112
113 if istask(v):
114 func_name = name((k, 'function'))
115 if func_name not in seen:
116 seen.add(func_name)
117 g.node(func_name, label=task_label(v), shape='circle',
118 **function_attributes.get(k, {}))
119 g.edge(func_name, k_name)
120
121 for dep in get_dependencies(dsk, k):
122 dep_name = name(dep)
123 if dep_name not in seen:
124 seen.add(dep_name)
125 g.node(dep_name, label=label(dep, cache=cache), shape='box',
126 **data_attributes.get(dep, {}))
127 g.edge(dep_name, func_name)
128 elif ishashable(v) and v in dsk:
129 g.edge(name(v), k_name)
130 return g
131
132
133 def dot_graph(dsk, filename='mydask', **kwargs):
134 g = to_graphviz(dsk, **kwargs)
135 g.save(filename + '.dot')
136
137 try:
138 check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)
139 check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)
140 except CalledProcessError:
141 raise RuntimeError(
142 "Please install The `dot` utility from graphviz:\n"
143 " Debian: sudo apt-get install graphviz\n"
144 " Mac OSX: brew install graphviz\n"
145 " Windows: http://www.graphviz.org/Download..php") # pragma: no cover
146 try:
147 from IPython.display import Image
148 return Image(filename + '.png')
149 except ImportError:
150 pass
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dask/dot.py b/dask/dot.py
--- a/dask/dot.py
+++ b/dask/dot.py
@@ -6,6 +6,7 @@
from graphviz import Digraph
from .core import istask, get_dependencies, ishashable
+from .compatibility import BytesIO
def task_label(task):
@@ -132,19 +133,35 @@
def dot_graph(dsk, filename='mydask', **kwargs):
g = to_graphviz(dsk, **kwargs)
- g.save(filename + '.dot')
- try:
- check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)
- check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)
- except CalledProcessError:
- raise RuntimeError(
- "Please install The `dot` utility from graphviz:\n"
- " Debian: sudo apt-get install graphviz\n"
- " Mac OSX: brew install graphviz\n"
- " Windows: http://www.graphviz.org/Download..php") # pragma: no cover
- try:
- from IPython.display import Image
- return Image(filename + '.png')
- except ImportError:
- pass
+ if filename is not None:
+ g.save(filename + '.dot')
+
+ try:
+ check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),
+ shell=True)
+ check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),
+ shell=True)
+
+ except CalledProcessError:
+ msg = ("Please install The `dot` utility from graphviz:\n"
+ " Debian: sudo apt-get install graphviz\n"
+ " Mac OSX: brew install graphviz\n"
+ " Windows: http://www.graphviz.org/Download..php")
+ raise RuntimeError(msg) # pragma: no cover
+
+ try:
+ from IPython.display import Image
+ return Image(filename + '.png')
+ except ImportError:
+ pass
+
+ else:
+ try:
+ from IPython.display import Image
+ s = BytesIO()
+ s.write(g.pipe(format='png'))
+ s.seek(0)
+ return Image(s.read())
+ except ImportError:
+ pass
|
{"golden_diff": "diff --git a/dask/dot.py b/dask/dot.py\n--- a/dask/dot.py\n+++ b/dask/dot.py\n@@ -6,6 +6,7 @@\n from graphviz import Digraph\n \n from .core import istask, get_dependencies, ishashable\n+from .compatibility import BytesIO\n \n \n def task_label(task):\n@@ -132,19 +133,35 @@\n \n def dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n- g.save(filename + '.dot')\n \n- try:\n- check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n- check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n- except CalledProcessError:\n- raise RuntimeError(\n- \"Please install The `dot` utility from graphviz:\\n\"\n- \" Debian: sudo apt-get install graphviz\\n\"\n- \" Mac OSX: brew install graphviz\\n\"\n- \" Windows: http://www.graphviz.org/Download..php\") # pragma: no cover\n- try:\n- from IPython.display import Image\n- return Image(filename + '.png')\n- except ImportError:\n- pass\n+ if filename is not None:\n+ g.save(filename + '.dot')\n+\n+ try:\n+ check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),\n+ shell=True)\n+ check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),\n+ shell=True)\n+\n+ except CalledProcessError:\n+ msg = (\"Please install The `dot` utility from graphviz:\\n\"\n+ \" Debian: sudo apt-get install graphviz\\n\"\n+ \" Mac OSX: brew install graphviz\\n\"\n+ \" Windows: http://www.graphviz.org/Download..php\")\n+ raise RuntimeError(msg) # pragma: no cover\n+\n+ try:\n+ from IPython.display import Image\n+ return Image(filename + '.png')\n+ except ImportError:\n+ pass\n+\n+ else:\n+ try:\n+ from IPython.display import Image\n+ s = BytesIO()\n+ s.write(g.pipe(format='png'))\n+ s.seek(0)\n+ return Image(s.read())\n+ except ImportError:\n+ pass\n", "issue": "Add Base.to_graphviz\nAdd function to return `graphviz` instance created from dask graph for below reasons:\n- When using IPython, `.visualize` outputs unnecessary image file\n- Sometimes we want to modify graphviz instance directly\n\n", "before_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom subprocess import check_call, CalledProcessError\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n while hasattr(func, 'func'):\n func = func.func\n return func.__name__\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n m = re.search(_HASHPAT, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n g = Digraph(graph_attr={'rankdir': 'BT'})\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\ndef dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n g.save(filename + '.dot')\n\n try:\n check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename), shell=True)\n check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename), shell=True)\n except CalledProcessError:\n raise RuntimeError(\n \"Please install The `dot` utility from graphviz:\\n\"\n \" Debian: sudo apt-get install graphviz\\n\"\n \" Mac OSX: brew install graphviz\\n\"\n \" Windows: http://www.graphviz.org/Download..php\") # pragma: no cover\n try:\n from IPython.display import Image\n return Image(filename + '.png')\n except ImportError:\n pass\n", "path": "dask/dot.py"}], "after_files": [{"content": "from __future__ import absolute_import, division, print_function\n\nimport re\nfrom subprocess import check_call, CalledProcessError\n\nfrom graphviz import Digraph\n\nfrom .core import istask, get_dependencies, ishashable\nfrom .compatibility import BytesIO\n\n\ndef task_label(task):\n \"\"\"Label for a task on a dot graph.\n\n Examples\n --------\n >>> from operator import add\n >>> task_label((add, 1, 2))\n 'add'\n >>> task_label((add, (add, 1, 2), 3))\n 'add(...)'\n \"\"\"\n func = task[0]\n if hasattr(func, 'funcs'):\n if len(func.funcs) > 1:\n return '{0}(...)'.format(funcname(func.funcs[0]))\n else:\n head = funcname(func.funcs[0])\n else:\n head = funcname(task[0])\n if any(has_sub_tasks(i) for i in task[1:]):\n return '{0}(...)'.format(head)\n else:\n return head\n\n\ndef has_sub_tasks(task):\n \"\"\"Returns True if the task has sub tasks\"\"\"\n if istask(task):\n return True\n elif isinstance(task, list):\n return any(has_sub_tasks(i) for i in task)\n else:\n return False\n\n\ndef funcname(func):\n \"\"\"Get the name of a function.\"\"\"\n while hasattr(func, 'func'):\n func = func.func\n return func.__name__\n\n\ndef name(x):\n try:\n return str(hash(x))\n except TypeError:\n return str(hash(str(x)))\n\n\n_HASHPAT = re.compile('([0-9a-z]{32})')\n\n\ndef label(x, cache=None):\n \"\"\"\n\n >>> label('x')\n 'x'\n\n >>> label(('x', 1))\n \"('x', 1)\"\n\n >>> from hashlib import md5\n >>> x = 'x-%s-hello' % md5(b'1234').hexdigest()\n >>> x\n 'x-81dc9bdb52d04dc20036dbd8313ed055-hello'\n\n >>> label(x)\n 'x-#-hello'\n \"\"\"\n s = str(x)\n m = re.search(_HASHPAT, s)\n if m is not None:\n for h in m.groups():\n if cache is not None:\n n = cache.get(h, len(cache))\n label = '#{0}'.format(n)\n # cache will be overwritten destructively\n cache[h] = n\n else:\n label = '#'\n s = s.replace(h, label)\n return s\n\n\ndef to_graphviz(dsk, data_attributes=None, function_attributes=None):\n if data_attributes is None:\n data_attributes = {}\n if function_attributes is None:\n function_attributes = {}\n\n g = Digraph(graph_attr={'rankdir': 'BT'})\n\n seen = set()\n cache = {}\n\n for k, v in dsk.items():\n k_name = name(k)\n if k_name not in seen:\n seen.add(k_name)\n g.node(k_name, label=label(k, cache=cache), shape='box',\n **data_attributes.get(k, {}))\n\n if istask(v):\n func_name = name((k, 'function'))\n if func_name not in seen:\n seen.add(func_name)\n g.node(func_name, label=task_label(v), shape='circle',\n **function_attributes.get(k, {}))\n g.edge(func_name, k_name)\n\n for dep in get_dependencies(dsk, k):\n dep_name = name(dep)\n if dep_name not in seen:\n seen.add(dep_name)\n g.node(dep_name, label=label(dep, cache=cache), shape='box',\n **data_attributes.get(dep, {}))\n g.edge(dep_name, func_name)\n elif ishashable(v) and v in dsk:\n g.edge(name(v), k_name)\n return g\n\n\ndef dot_graph(dsk, filename='mydask', **kwargs):\n g = to_graphviz(dsk, **kwargs)\n\n if filename is not None:\n g.save(filename + '.dot')\n\n try:\n check_call('dot -Tpdf {0}.dot -o {0}.pdf'.format(filename),\n shell=True)\n check_call('dot -Tpng {0}.dot -o {0}.png'.format(filename),\n shell=True)\n\n except CalledProcessError:\n msg = (\"Please install The `dot` utility from graphviz:\\n\"\n \" Debian: sudo apt-get install graphviz\\n\"\n \" Mac OSX: brew install graphviz\\n\"\n \" Windows: http://www.graphviz.org/Download..php\")\n raise RuntimeError(msg) # pragma: no cover\n\n try:\n from IPython.display import Image\n return Image(filename + '.png')\n except ImportError:\n pass\n\n else:\n try:\n from IPython.display import Image\n s = BytesIO()\n s.write(g.pipe(format='png'))\n s.seek(0)\n return Image(s.read())\n except ImportError:\n pass\n", "path": "dask/dot.py"}]}
| 1,714 | 551 |
gh_patches_debug_5288
|
rasdani/github-patches
|
git_diff
|
great-expectations__great_expectations-1576
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Use cleaner solution for non-truncating division in python 2
Prefer `from __future__ import division` to `1.*x/y`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `great_expectations/data_context/util.py`
Content:
```
1 import copy
2 import importlib
3 import inspect
4 import logging
5 import os
6 import re
7 from collections import OrderedDict
8
9 from great_expectations.data_context.types.base import (
10 DataContextConfig,
11 DataContextConfigSchema,
12 )
13 from great_expectations.exceptions import (
14 MissingConfigVariableError,
15 PluginClassNotFoundError,
16 PluginModuleNotFoundError,
17 )
18 from great_expectations.util import verify_dynamic_loading_support
19
20 logger = logging.getLogger(__name__)
21
22
23 def load_class(class_name, module_name):
24 """Dynamically load a class from strings or raise a helpful error."""
25 try:
26 loaded_module = importlib.import_module(module_name)
27 class_ = getattr(loaded_module, class_name)
28 except ModuleNotFoundError:
29 raise PluginModuleNotFoundError(module_name)
30 except AttributeError:
31 raise PluginClassNotFoundError(module_name=module_name, class_name=class_name)
32 return class_
33
34
35 # TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default
36 # TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.
37 def instantiate_class_from_config(config, runtime_environment, config_defaults=None):
38 """Build a GE class from configuration dictionaries."""
39
40 if config_defaults is None:
41 config_defaults = {}
42
43 config = copy.deepcopy(config)
44
45 module_name = config.pop("module_name", None)
46 if module_name is None:
47 try:
48 module_name = config_defaults.pop("module_name")
49 except KeyError:
50 raise KeyError(
51 "Neither config : {} nor config_defaults : {} contains a module_name key.".format(
52 config, config_defaults,
53 )
54 )
55 else:
56 # Pop the value without using it, to avoid sending an unwanted value to the config_class
57 config_defaults.pop("module_name", None)
58
59 verify_dynamic_loading_support(module_name=module_name)
60
61 class_name = config.pop("class_name", None)
62 if class_name is None:
63 logger.warning(
64 "Instantiating class from config without an explicit class_name is dangerous. Consider adding "
65 "an explicit class_name for %s" % config.get("name")
66 )
67 try:
68 class_name = config_defaults.pop("class_name")
69 except KeyError:
70 raise KeyError(
71 "Neither config : {} nor config_defaults : {} contains a class_name key.".format(
72 config, config_defaults,
73 )
74 )
75 else:
76 # Pop the value without using it, to avoid sending an unwanted value to the config_class
77 config_defaults.pop("class_name", None)
78
79 class_ = load_class(class_name=class_name, module_name=module_name)
80
81 config_with_defaults = copy.deepcopy(config_defaults)
82 config_with_defaults.update(config)
83 if runtime_environment is not None:
84 # If there are additional kwargs available in the runtime_environment requested by a
85 # class to be instantiated, provide them
86 argspec = inspect.getfullargspec(class_.__init__)[0][1:]
87
88 missing_args = set(argspec) - set(config_with_defaults.keys())
89 config_with_defaults.update(
90 {
91 missing_arg: runtime_environment[missing_arg]
92 for missing_arg in missing_args
93 if missing_arg in runtime_environment
94 }
95 )
96 # Add the entire runtime_environment as well if it's requested
97 if "runtime_environment" in missing_args:
98 config_with_defaults.update({"runtime_environment": runtime_environment})
99
100 try:
101 class_instance = class_(**config_with_defaults)
102 except TypeError as e:
103 raise TypeError(
104 "Couldn't instantiate class : {} with config : \n\t{}\n \n".format(
105 class_name, format_dict_for_error_message(config_with_defaults)
106 )
107 + str(e)
108 )
109
110 return class_instance
111
112
113 def format_dict_for_error_message(dict_):
114 # TODO : Tidy this up a bit. Indentation isn't fully consistent.
115
116 return "\n\t".join("\t\t".join((str(key), str(dict_[key]))) for key in dict_)
117
118
119 def substitute_config_variable(template_str, config_variables_dict):
120 """
121 This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,
122 returns a string where the pattern is replaced with the value of SOME_VARIABLE,
123 otherwise returns the string unchanged.
124
125 If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.
126 If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).
127 If it is not found there, the input string is returned as is.
128
129 :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}
130 or $SOME_VARIABLE
131 :param config_variables_dict: a dictionary of config variables. It is loaded from the
132 config variables store (by default, "uncommitted/config_variables.yml file)
133 :return:
134 """
135 if template_str is None:
136 return template_str
137
138 try:
139 match = re.search(r"\$\{(.*?)\}", template_str) or re.search(
140 r"\$([_a-z][_a-z0-9]*)", template_str
141 )
142 except TypeError:
143 # If the value is not a string (e.g., a boolean), we should return it as is
144 return template_str
145
146 if match:
147 config_variable_value = config_variables_dict.get(match.group(1))
148
149 if config_variable_value:
150 if match.start() == 0 and match.end() == len(template_str):
151 return config_variable_value
152 else:
153 return (
154 template_str[: match.start()]
155 + config_variable_value
156 + template_str[match.end() :]
157 )
158
159 raise MissingConfigVariableError(
160 f"""\n\nUnable to find a match for config substitution variable: `{match.group(1)}`.
161 Please add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.
162 See https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets""",
163 missing_config_variable=match.group(1),
164 )
165
166 return template_str
167
168
169 def substitute_all_config_variables(data, replace_variables_dict):
170 """
171 Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like
172 config object for their values.
173
174 The method traverses the dictionary recursively.
175
176 :param data:
177 :param replace_variables_dict:
178 :return: a dictionary with all the variables replaced with their values
179 """
180 if isinstance(data, DataContextConfig):
181 data = DataContextConfigSchema().dump(data)
182
183 if isinstance(data, dict) or isinstance(data, OrderedDict):
184 return {
185 k: substitute_all_config_variables(v, replace_variables_dict)
186 for k, v in data.items()
187 }
188 elif isinstance(data, list):
189 return [
190 substitute_all_config_variables(v, replace_variables_dict) for v in data
191 ]
192 return substitute_config_variable(data, replace_variables_dict)
193
194
195 def file_relative_path(dunderfile, relative_path):
196 """
197 This function is useful when one needs to load a file that is
198 relative to the position of the current file. (Such as when
199 you encode a configuration file path in source file and want
200 in runnable in any current working directory)
201
202 It is meant to be used like the following:
203 file_relative_path(__file__, 'path/relative/to/file')
204
205 H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34
206 """
207 return os.path.join(os.path.dirname(dunderfile), relative_path)
208
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py
--- a/great_expectations/data_context/util.py
+++ b/great_expectations/data_context/util.py
@@ -146,7 +146,7 @@
if match:
config_variable_value = config_variables_dict.get(match.group(1))
- if config_variable_value:
+ if config_variable_value is not None:
if match.start() == 0 and match.end() == len(template_str):
return config_variable_value
else:
|
{"golden_diff": "diff --git a/great_expectations/data_context/util.py b/great_expectations/data_context/util.py\n--- a/great_expectations/data_context/util.py\n+++ b/great_expectations/data_context/util.py\n@@ -146,7 +146,7 @@\n if match:\n config_variable_value = config_variables_dict.get(match.group(1))\n \n- if config_variable_value:\n+ if config_variable_value is not None:\n if match.start() == 0 and match.end() == len(template_str):\n return config_variable_value\n else:\n", "issue": "Use cleaner solution for non-truncating division in python 2\nPrefer `from __future__ import division` to `1.*x/y`\n", "before_files": [{"content": "import copy\nimport importlib\nimport inspect\nimport logging\nimport os\nimport re\nfrom collections import OrderedDict\n\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n DataContextConfigSchema,\n)\nfrom great_expectations.exceptions import (\n MissingConfigVariableError,\n PluginClassNotFoundError,\n PluginModuleNotFoundError,\n)\nfrom great_expectations.util import verify_dynamic_loading_support\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_class(class_name, module_name):\n \"\"\"Dynamically load a class from strings or raise a helpful error.\"\"\"\n try:\n loaded_module = importlib.import_module(module_name)\n class_ = getattr(loaded_module, class_name)\n except ModuleNotFoundError:\n raise PluginModuleNotFoundError(module_name)\n except AttributeError:\n raise PluginClassNotFoundError(module_name=module_name, class_name=class_name)\n return class_\n\n\n# TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default\n# TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.\ndef instantiate_class_from_config(config, runtime_environment, config_defaults=None):\n \"\"\"Build a GE class from configuration dictionaries.\"\"\"\n\n if config_defaults is None:\n config_defaults = {}\n\n config = copy.deepcopy(config)\n\n module_name = config.pop(\"module_name\", None)\n if module_name is None:\n try:\n module_name = config_defaults.pop(\"module_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a module_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"module_name\", None)\n\n verify_dynamic_loading_support(module_name=module_name)\n\n class_name = config.pop(\"class_name\", None)\n if class_name is None:\n logger.warning(\n \"Instantiating class from config without an explicit class_name is dangerous. Consider adding \"\n \"an explicit class_name for %s\" % config.get(\"name\")\n )\n try:\n class_name = config_defaults.pop(\"class_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a class_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"class_name\", None)\n\n class_ = load_class(class_name=class_name, module_name=module_name)\n\n config_with_defaults = copy.deepcopy(config_defaults)\n config_with_defaults.update(config)\n if runtime_environment is not None:\n # If there are additional kwargs available in the runtime_environment requested by a\n # class to be instantiated, provide them\n argspec = inspect.getfullargspec(class_.__init__)[0][1:]\n\n missing_args = set(argspec) - set(config_with_defaults.keys())\n config_with_defaults.update(\n {\n missing_arg: runtime_environment[missing_arg]\n for missing_arg in missing_args\n if missing_arg in runtime_environment\n }\n )\n # Add the entire runtime_environment as well if it's requested\n if \"runtime_environment\" in missing_args:\n config_with_defaults.update({\"runtime_environment\": runtime_environment})\n\n try:\n class_instance = class_(**config_with_defaults)\n except TypeError as e:\n raise TypeError(\n \"Couldn't instantiate class : {} with config : \\n\\t{}\\n \\n\".format(\n class_name, format_dict_for_error_message(config_with_defaults)\n )\n + str(e)\n )\n\n return class_instance\n\n\ndef format_dict_for_error_message(dict_):\n # TODO : Tidy this up a bit. Indentation isn't fully consistent.\n\n return \"\\n\\t\".join(\"\\t\\t\".join((str(key), str(dict_[key]))) for key in dict_)\n\n\ndef substitute_config_variable(template_str, config_variables_dict):\n \"\"\"\n This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,\n returns a string where the pattern is replaced with the value of SOME_VARIABLE,\n otherwise returns the string unchanged.\n\n If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.\n If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).\n If it is not found there, the input string is returned as is.\n\n :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}\n or $SOME_VARIABLE\n :param config_variables_dict: a dictionary of config variables. It is loaded from the\n config variables store (by default, \"uncommitted/config_variables.yml file)\n :return:\n \"\"\"\n if template_str is None:\n return template_str\n\n try:\n match = re.search(r\"\\$\\{(.*?)\\}\", template_str) or re.search(\n r\"\\$([_a-z][_a-z0-9]*)\", template_str\n )\n except TypeError:\n # If the value is not a string (e.g., a boolean), we should return it as is\n return template_str\n\n if match:\n config_variable_value = config_variables_dict.get(match.group(1))\n\n if config_variable_value:\n if match.start() == 0 and match.end() == len(template_str):\n return config_variable_value\n else:\n return (\n template_str[: match.start()]\n + config_variable_value\n + template_str[match.end() :]\n )\n\n raise MissingConfigVariableError(\n f\"\"\"\\n\\nUnable to find a match for config substitution variable: `{match.group(1)}`.\nPlease add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.\nSee https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\",\n missing_config_variable=match.group(1),\n )\n\n return template_str\n\n\ndef substitute_all_config_variables(data, replace_variables_dict):\n \"\"\"\n Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like\n config object for their values.\n\n The method traverses the dictionary recursively.\n\n :param data:\n :param replace_variables_dict:\n :return: a dictionary with all the variables replaced with their values\n \"\"\"\n if isinstance(data, DataContextConfig):\n data = DataContextConfigSchema().dump(data)\n\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n for k, v in data.items()\n }\n elif isinstance(data, list):\n return [\n substitute_all_config_variables(v, replace_variables_dict) for v in data\n ]\n return substitute_config_variable(data, replace_variables_dict)\n\n\ndef file_relative_path(dunderfile, relative_path):\n \"\"\"\n This function is useful when one needs to load a file that is\n relative to the position of the current file. (Such as when\n you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n It is meant to be used like the following:\n file_relative_path(__file__, 'path/relative/to/file')\n\n H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34\n \"\"\"\n return os.path.join(os.path.dirname(dunderfile), relative_path)\n", "path": "great_expectations/data_context/util.py"}], "after_files": [{"content": "import copy\nimport importlib\nimport inspect\nimport logging\nimport os\nimport re\nfrom collections import OrderedDict\n\nfrom great_expectations.data_context.types.base import (\n DataContextConfig,\n DataContextConfigSchema,\n)\nfrom great_expectations.exceptions import (\n MissingConfigVariableError,\n PluginClassNotFoundError,\n PluginModuleNotFoundError,\n)\nfrom great_expectations.util import verify_dynamic_loading_support\n\nlogger = logging.getLogger(__name__)\n\n\ndef load_class(class_name, module_name):\n \"\"\"Dynamically load a class from strings or raise a helpful error.\"\"\"\n try:\n loaded_module = importlib.import_module(module_name)\n class_ = getattr(loaded_module, class_name)\n except ModuleNotFoundError:\n raise PluginModuleNotFoundError(module_name)\n except AttributeError:\n raise PluginClassNotFoundError(module_name=module_name, class_name=class_name)\n return class_\n\n\n# TODO: Rename config to constructor_kwargs and config_defaults -> constructor_kwarg_default\n# TODO: Improve error messages in this method. Since so much of our workflow is config-driven, this will be a *super* important part of DX.\ndef instantiate_class_from_config(config, runtime_environment, config_defaults=None):\n \"\"\"Build a GE class from configuration dictionaries.\"\"\"\n\n if config_defaults is None:\n config_defaults = {}\n\n config = copy.deepcopy(config)\n\n module_name = config.pop(\"module_name\", None)\n if module_name is None:\n try:\n module_name = config_defaults.pop(\"module_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a module_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"module_name\", None)\n\n verify_dynamic_loading_support(module_name=module_name)\n\n class_name = config.pop(\"class_name\", None)\n if class_name is None:\n logger.warning(\n \"Instantiating class from config without an explicit class_name is dangerous. Consider adding \"\n \"an explicit class_name for %s\" % config.get(\"name\")\n )\n try:\n class_name = config_defaults.pop(\"class_name\")\n except KeyError:\n raise KeyError(\n \"Neither config : {} nor config_defaults : {} contains a class_name key.\".format(\n config, config_defaults,\n )\n )\n else:\n # Pop the value without using it, to avoid sending an unwanted value to the config_class\n config_defaults.pop(\"class_name\", None)\n\n class_ = load_class(class_name=class_name, module_name=module_name)\n\n config_with_defaults = copy.deepcopy(config_defaults)\n config_with_defaults.update(config)\n if runtime_environment is not None:\n # If there are additional kwargs available in the runtime_environment requested by a\n # class to be instantiated, provide them\n argspec = inspect.getfullargspec(class_.__init__)[0][1:]\n\n missing_args = set(argspec) - set(config_with_defaults.keys())\n config_with_defaults.update(\n {\n missing_arg: runtime_environment[missing_arg]\n for missing_arg in missing_args\n if missing_arg in runtime_environment\n }\n )\n # Add the entire runtime_environment as well if it's requested\n if \"runtime_environment\" in missing_args:\n config_with_defaults.update({\"runtime_environment\": runtime_environment})\n\n try:\n class_instance = class_(**config_with_defaults)\n except TypeError as e:\n raise TypeError(\n \"Couldn't instantiate class : {} with config : \\n\\t{}\\n \\n\".format(\n class_name, format_dict_for_error_message(config_with_defaults)\n )\n + str(e)\n )\n\n return class_instance\n\n\ndef format_dict_for_error_message(dict_):\n # TODO : Tidy this up a bit. Indentation isn't fully consistent.\n\n return \"\\n\\t\".join(\"\\t\\t\".join((str(key), str(dict_[key]))) for key in dict_)\n\n\ndef substitute_config_variable(template_str, config_variables_dict):\n \"\"\"\n This method takes a string, and if it contains a pattern ${SOME_VARIABLE} or $SOME_VARIABLE,\n returns a string where the pattern is replaced with the value of SOME_VARIABLE,\n otherwise returns the string unchanged.\n\n If the environment variable SOME_VARIABLE is set, the method uses its value for substitution.\n If it is not set, the value of SOME_VARIABLE is looked up in the config variables store (file).\n If it is not found there, the input string is returned as is.\n\n :param template_str: a string that might or might not be of the form ${SOME_VARIABLE}\n or $SOME_VARIABLE\n :param config_variables_dict: a dictionary of config variables. It is loaded from the\n config variables store (by default, \"uncommitted/config_variables.yml file)\n :return:\n \"\"\"\n if template_str is None:\n return template_str\n\n try:\n match = re.search(r\"\\$\\{(.*?)\\}\", template_str) or re.search(\n r\"\\$([_a-z][_a-z0-9]*)\", template_str\n )\n except TypeError:\n # If the value is not a string (e.g., a boolean), we should return it as is\n return template_str\n\n if match:\n config_variable_value = config_variables_dict.get(match.group(1))\n\n if config_variable_value is not None:\n if match.start() == 0 and match.end() == len(template_str):\n return config_variable_value\n else:\n return (\n template_str[: match.start()]\n + config_variable_value\n + template_str[match.end() :]\n )\n\n raise MissingConfigVariableError(\n f\"\"\"\\n\\nUnable to find a match for config substitution variable: `{match.group(1)}`.\nPlease add this missing variable to your `uncommitted/config_variables.yml` file or your environment variables.\nSee https://great-expectations.readthedocs.io/en/latest/reference/data_context_reference.html#managing-environment-and-secrets\"\"\",\n missing_config_variable=match.group(1),\n )\n\n return template_str\n\n\ndef substitute_all_config_variables(data, replace_variables_dict):\n \"\"\"\n Substitute all config variables of the form ${SOME_VARIABLE} in a dictionary-like\n config object for their values.\n\n The method traverses the dictionary recursively.\n\n :param data:\n :param replace_variables_dict:\n :return: a dictionary with all the variables replaced with their values\n \"\"\"\n if isinstance(data, DataContextConfig):\n data = DataContextConfigSchema().dump(data)\n\n if isinstance(data, dict) or isinstance(data, OrderedDict):\n return {\n k: substitute_all_config_variables(v, replace_variables_dict)\n for k, v in data.items()\n }\n elif isinstance(data, list):\n return [\n substitute_all_config_variables(v, replace_variables_dict) for v in data\n ]\n return substitute_config_variable(data, replace_variables_dict)\n\n\ndef file_relative_path(dunderfile, relative_path):\n \"\"\"\n This function is useful when one needs to load a file that is\n relative to the position of the current file. (Such as when\n you encode a configuration file path in source file and want\n in runnable in any current working directory)\n\n It is meant to be used like the following:\n file_relative_path(__file__, 'path/relative/to/file')\n\n H/T https://github.com/dagster-io/dagster/blob/8a250e9619a49e8bff8e9aa7435df89c2d2ea039/python_modules/dagster/dagster/utils/__init__.py#L34\n \"\"\"\n return os.path.join(os.path.dirname(dunderfile), relative_path)\n", "path": "great_expectations/data_context/util.py"}]}
| 2,469 | 120 |
gh_patches_debug_5012
|
rasdani/github-patches
|
git_diff
|
pretalx__pretalx-338
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unable to add questions
The following link doesn't do anything: `https://[Domain]/orga/event/[CFP_name]/cfp/questions/new`
## Expected Behavior
Being be to create a question
## Current Behavior
Unable to create a question
## Possible Solution
Fix the edit button.
## Steps to Reproduce (for bugs)
1. Create a new CFP
2. Go on `https://[Domain]/orga/event/[CFP_name]/cfp/questions/new`
3. Click on the edit button (the pen)
## Context
Creating a CFP
## Your Environment
* Version used: 0.4
* Environment name and version: Firefox 58.0.1 & Chromium 64.0.3282.119
* Operating System and version: Ubuntu 17.10 / Server: Ubuntu 16.04 with python 3.6
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pretalx/orga/views/cfp.py`
Content:
```
1 from csp.decorators import csp_update
2 from django.contrib import messages
3 from django.db import transaction
4 from django.db.models.deletion import ProtectedError
5 from django.forms.models import inlineformset_factory
6 from django.shortcuts import redirect
7 from django.utils.decorators import method_decorator
8 from django.utils.functional import cached_property
9 from django.utils.translation import ugettext_lazy as _
10 from django.views.generic import ListView, TemplateView, UpdateView, View
11
12 from pretalx.common.forms import I18nFormSet
13 from pretalx.common.mixins.views import ActionFromUrl, PermissionRequired
14 from pretalx.common.views import CreateOrUpdateView
15 from pretalx.orga.forms import CfPForm, QuestionForm, SubmissionTypeForm
16 from pretalx.orga.forms.cfp import AnswerOptionForm, CfPSettingsForm
17 from pretalx.submission.models import (
18 AnswerOption, CfP, Question, SubmissionType,
19 )
20
21
22 class CfPTextDetail(PermissionRequired, ActionFromUrl, UpdateView):
23 form_class = CfPForm
24 model = CfP
25 template_name = 'orga/cfp/text.html'
26 permission_required = 'orga.edit_cfp'
27 write_permission_required = 'orga.edit_cfp'
28
29 def get_context_data(self, *args, **kwargs):
30 ctx = super().get_context_data(*args, **kwargs)
31 ctx['sform'] = self.sform
32 return ctx
33
34 @cached_property
35 def sform(self):
36 return CfPSettingsForm(
37 read_only=(self._action == 'view'),
38 locales=self.request.event.locales,
39 obj=self.request.event,
40 attribute_name='settings',
41 data=self.request.POST if self.request.method == "POST" else None,
42 prefix='settings'
43 )
44
45 def get_object(self):
46 return self.request.event.cfp
47
48 def get_success_url(self) -> str:
49 return self.get_object().urls.text
50
51 def form_valid(self, form):
52 if not self.sform.is_valid():
53 messages.error(self.request, _('We had trouble saving your input.'))
54 return self.form_invalid(form)
55 messages.success(self.request, 'The CfP update has been saved.')
56 form.instance.event = self.request.event
57 ret = super().form_valid(form)
58 if form.has_changed():
59 form.instance.log_action('pretalx.cfp.update', person=self.request.user, orga=True)
60 self.sform.save()
61 return ret
62
63
64 class CfPQuestionList(PermissionRequired, TemplateView):
65 template_name = 'orga/cfp/question_view.html'
66 permission_required = 'orga.view_question'
67
68 def get_permission_object(self):
69 return self.request.event
70
71 def get_context_data(self, *args, **kwargs):
72 ctx = super().get_context_data(*args, **kwargs)
73 ctx['speaker_questions'] = Question.all_objects.filter(event=self.request.event, target='speaker')
74 ctx['submission_questions'] = Question.all_objects.filter(event=self.request.event, target='submission')
75 return ctx
76
77
78 @method_decorator(csp_update(SCRIPT_SRC="'self' 'unsafe-inline'"), name='dispatch')
79 class CfPQuestionDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):
80 model = Question
81 form_class = QuestionForm
82 permission_required = 'orga.edit_question'
83 write_permission_required = 'orga.edit_question'
84
85 def get_template_names(self):
86 if self.request.path.lstrip('/').endswith('edit'):
87 return 'orga/cfp/question_form.html'
88 return 'orga/cfp/question_detail.html'
89
90 def get_permission_object(self):
91 return self.get_object() or self.request.event
92
93 def get_object(self) -> Question:
94 return Question.all_objects.filter(event=self.request.event, pk=self.kwargs.get('pk')).first()
95
96 @cached_property
97 def formset(self):
98 formset_class = inlineformset_factory(
99 Question, AnswerOption, form=AnswerOptionForm, formset=I18nFormSet,
100 can_delete=True, extra=0,
101 )
102 return formset_class(
103 self.request.POST if self.request.method == 'POST' else None,
104 queryset=AnswerOption.objects.filter(question=self.get_object()) if self.get_object() else AnswerOption.objects.none(),
105 event=self.request.event
106 )
107
108 def save_formset(self, obj):
109 if self.formset.is_valid():
110 for form in self.formset.initial_forms:
111 if form in self.formset.deleted_forms:
112 if not form.instance.pk:
113 continue
114 obj.log_action(
115 'pretalx.question.option.delete', person=self.request.user, orga=True, data={
116 'id': form.instance.pk
117 }
118 )
119 form.instance.delete()
120 form.instance.pk = None
121 elif form.has_changed():
122 form.instance.question = obj
123 form.save()
124 change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
125 change_data['id'] = form.instance.pk
126 obj.log_action(
127 'pretalx.question.option.update',
128 person=self.request.user, orga=True, data=change_data
129 )
130
131 for form in self.formset.extra_forms:
132 if not form.has_changed():
133 continue
134 if self.formset._should_delete_form(form):
135 continue
136 form.instance.question = obj
137 form.save()
138 change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}
139 change_data['id'] = form.instance.pk
140 obj.log_action(
141 'pretalx.question.option.create',
142 person=self.request.user, orga=True, data=change_data
143 )
144
145 return True
146 return False
147
148 def get_context_data(self, *args, **kwargs):
149 ctx = super().get_context_data(*args, **kwargs)
150 ctx['formset'] = self.formset
151 ctx['question'] = self.get_object()
152 return ctx
153
154 def get_form_kwargs(self, *args, **kwargs):
155 kwargs = super().get_form_kwargs(*args, **kwargs)
156 if not self.get_object():
157 initial = kwargs['initial'] or dict()
158 initial['target'] = self.request.GET.get('type')
159 kwargs['initial'] = initial
160 return kwargs
161
162 def get_success_url(self) -> str:
163 obj = self.get_object() or self.instance
164 return obj.urls.base
165
166 @transaction.atomic
167 def form_valid(self, form):
168 form.instance.event = self.request.event
169 self.instance = form.instance
170 ret = super().form_valid(form)
171 if form.cleaned_data.get('variant') in ('choices', 'multiple_choice'):
172 result = self.save_formset(self.instance)
173 if not result:
174 return self.get(self.request, *self.args, **self.kwargs)
175 if form.has_changed():
176 action = 'pretalx.question.' + ('update' if self.object else 'create')
177 form.instance.log_action(action, person=self.request.user, orga=True)
178 messages.success(self.request, 'The question has been saved.')
179 return ret
180
181
182 class CfPQuestionDelete(PermissionRequired, View):
183 permission_required = 'orga.remove_question'
184
185 def get_object(self) -> Question:
186 return Question.all_objects.get(event=self.request.event, pk=self.kwargs.get('pk'))
187
188 def dispatch(self, request, *args, **kwargs):
189 super().dispatch(request, *args, **kwargs)
190 question = self.get_object()
191
192 try:
193 with transaction.atomic():
194 question.options.all().delete()
195 question.delete()
196 question.log_action('pretalx.question.delete', person=self.request.user, orga=True)
197 messages.success(request, _('The question has been deleted.'))
198 except ProtectedError:
199 question.active = False
200 question.save()
201 messages.error(request, _('You cannot delete a question that has already been answered. We have deactivated the question instead.'))
202 return redirect(self.request.event.cfp.urls.questions)
203
204
205 class CfPQuestionToggle(PermissionRequired, View):
206 permission_required = 'orga.edit_question'
207
208 def get_object(self) -> Question:
209 return Question.all_objects.filter(event=self.request.event, pk=self.kwargs.get('pk')).first()
210
211 def dispatch(self, request, *args, **kwargs):
212 super().dispatch(request, *args, **kwargs)
213 question = self.get_object()
214
215 question.active = not question.active
216 question.save(update_fields=['active'])
217 return redirect(question.urls.base)
218
219
220 class SubmissionTypeList(PermissionRequired, ListView):
221 template_name = 'orga/cfp/submission_type_view.html'
222 context_object_name = 'types'
223 permission_required = 'orga.view_submission_type'
224
225 def get_permission_object(self):
226 return self.request.event
227
228 def get_queryset(self):
229 return self.request.event.submission_types.all()
230
231
232 class SubmissionTypeDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):
233 model = SubmissionType
234 form_class = SubmissionTypeForm
235 template_name = 'orga/cfp/submission_type_form.html'
236 permission_required = 'orga.edit_submission_type'
237 write_permission_required = 'orga.edit_submission_type'
238
239 def get_success_url(self) -> str:
240 return self.request.event.cfp.urls.types
241
242 def get_object(self):
243 return self.request.event.submission_types.filter(pk=self.kwargs.get('pk')).first()
244
245 def get_permission_object(self):
246 return self.get_object() or self.request.event
247
248 def form_valid(self, form):
249 messages.success(self.request, 'The Submission Type has been saved.')
250 form.instance.event = self.request.event
251 ret = super().form_valid(form)
252 if form.has_changed():
253 action = 'pretalx.submission_type.' + ('update' if self.object else 'create')
254 form.instance.log_action(action, person=self.request.user, orga=True)
255 return ret
256
257
258 class SubmissionTypeDefault(PermissionRequired, View):
259 permission_required = 'orga.edit_submission_type'
260
261 def get_object(self):
262 return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))
263
264 def dispatch(self, request, *args, **kwargs):
265 super().dispatch(request, *args, **kwargs)
266 submission_type = self.get_object()
267 self.request.event.cfp.default_type = submission_type
268 self.request.event.cfp.save(update_fields=['default_type'])
269 submission_type.log_action('pretalx.submission_type.make_default', person=self.request.user, orga=True)
270 messages.success(request, _('The Submission Type has been made default.'))
271 return redirect(self.request.event.cfp.urls.types)
272
273
274 class SubmissionTypeDelete(PermissionRequired, View):
275 permission_required = 'orga.remove_submission_type'
276
277 def get_object(self):
278 return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))
279
280 def dispatch(self, request, *args, **kwargs):
281 super().dispatch(request, *args, **kwargs)
282 submission_type = self.get_object()
283
284 if request.event.submission_types.count() == 1:
285 messages.error(request, _('You cannot delete the only submission type. Try creating another one first!'))
286 elif request.event.cfp.default_type == submission_type:
287 messages.error(request, _('You cannot delete the default submission type. Make another type default first!'))
288 else:
289 try:
290 submission_type.delete()
291 request.event.log_action('pretalx.submission_type.delete', person=self.request.user, orga=True)
292 messages.success(request, _('The Submission Type has been deleted.'))
293 except ProtectedError: # TODO: show which/how many submissions are concerned
294 messages.error(request, _('This Submission Type is in use in a submission and cannot be deleted.'))
295 return redirect(self.request.event.cfp.urls.types)
296
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pretalx/orga/views/cfp.py b/src/pretalx/orga/views/cfp.py
--- a/src/pretalx/orga/views/cfp.py
+++ b/src/pretalx/orga/views/cfp.py
@@ -83,7 +83,8 @@
write_permission_required = 'orga.edit_question'
def get_template_names(self):
- if self.request.path.lstrip('/').endswith('edit'):
+ action = self.request.path.lstrip('/').rpartition('/')[2]
+ if action in ('edit', 'new'):
return 'orga/cfp/question_form.html'
return 'orga/cfp/question_detail.html'
|
{"golden_diff": "diff --git a/src/pretalx/orga/views/cfp.py b/src/pretalx/orga/views/cfp.py\n--- a/src/pretalx/orga/views/cfp.py\n+++ b/src/pretalx/orga/views/cfp.py\n@@ -83,7 +83,8 @@\n write_permission_required = 'orga.edit_question'\n \n def get_template_names(self):\n- if self.request.path.lstrip('/').endswith('edit'):\n+ action = self.request.path.lstrip('/').rpartition('/')[2]\n+ if action in ('edit', 'new'):\n return 'orga/cfp/question_form.html'\n return 'orga/cfp/question_detail.html'\n", "issue": "Unable to add questions\nThe following link doesn't do anything: `https://[Domain]/orga/event/[CFP_name]/cfp/questions/new`\r\n\r\n## Expected Behavior\r\n\r\nBeing be to create a question\r\n\r\n## Current Behavior\r\n\r\nUnable to create a question\r\n\r\n## Possible Solution\r\n\r\nFix the edit button.\r\n\r\n## Steps to Reproduce (for bugs)\r\n\r\n1. Create a new CFP\r\n2. Go on `https://[Domain]/orga/event/[CFP_name]/cfp/questions/new`\r\n3. Click on the edit button (the pen)\r\n\r\n## Context\r\n\r\nCreating a CFP\r\n\r\n## Your Environment\r\n* Version used: 0.4\r\n* Environment name and version: Firefox 58.0.1 & Chromium 64.0.3282.119\r\n* Operating System and version: Ubuntu 17.10 / Server: Ubuntu 16.04 with python 3.6\n", "before_files": [{"content": "from csp.decorators import csp_update\nfrom django.contrib import messages\nfrom django.db import transaction\nfrom django.db.models.deletion import ProtectedError\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import ListView, TemplateView, UpdateView, View\n\nfrom pretalx.common.forms import I18nFormSet\nfrom pretalx.common.mixins.views import ActionFromUrl, PermissionRequired\nfrom pretalx.common.views import CreateOrUpdateView\nfrom pretalx.orga.forms import CfPForm, QuestionForm, SubmissionTypeForm\nfrom pretalx.orga.forms.cfp import AnswerOptionForm, CfPSettingsForm\nfrom pretalx.submission.models import (\n AnswerOption, CfP, Question, SubmissionType,\n)\n\n\nclass CfPTextDetail(PermissionRequired, ActionFromUrl, UpdateView):\n form_class = CfPForm\n model = CfP\n template_name = 'orga/cfp/text.html'\n permission_required = 'orga.edit_cfp'\n write_permission_required = 'orga.edit_cfp'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['sform'] = self.sform\n return ctx\n\n @cached_property\n def sform(self):\n return CfPSettingsForm(\n read_only=(self._action == 'view'),\n locales=self.request.event.locales,\n obj=self.request.event,\n attribute_name='settings',\n data=self.request.POST if self.request.method == \"POST\" else None,\n prefix='settings'\n )\n\n def get_object(self):\n return self.request.event.cfp\n\n def get_success_url(self) -> str:\n return self.get_object().urls.text\n\n def form_valid(self, form):\n if not self.sform.is_valid():\n messages.error(self.request, _('We had trouble saving your input.'))\n return self.form_invalid(form)\n messages.success(self.request, 'The CfP update has been saved.')\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n if form.has_changed():\n form.instance.log_action('pretalx.cfp.update', person=self.request.user, orga=True)\n self.sform.save()\n return ret\n\n\nclass CfPQuestionList(PermissionRequired, TemplateView):\n template_name = 'orga/cfp/question_view.html'\n permission_required = 'orga.view_question'\n\n def get_permission_object(self):\n return self.request.event\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['speaker_questions'] = Question.all_objects.filter(event=self.request.event, target='speaker')\n ctx['submission_questions'] = Question.all_objects.filter(event=self.request.event, target='submission')\n return ctx\n\n\n@method_decorator(csp_update(SCRIPT_SRC=\"'self' 'unsafe-inline'\"), name='dispatch')\nclass CfPQuestionDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):\n model = Question\n form_class = QuestionForm\n permission_required = 'orga.edit_question'\n write_permission_required = 'orga.edit_question'\n\n def get_template_names(self):\n if self.request.path.lstrip('/').endswith('edit'):\n return 'orga/cfp/question_form.html'\n return 'orga/cfp/question_detail.html'\n\n def get_permission_object(self):\n return self.get_object() or self.request.event\n\n def get_object(self) -> Question:\n return Question.all_objects.filter(event=self.request.event, pk=self.kwargs.get('pk')).first()\n\n @cached_property\n def formset(self):\n formset_class = inlineformset_factory(\n Question, AnswerOption, form=AnswerOptionForm, formset=I18nFormSet,\n can_delete=True, extra=0,\n )\n return formset_class(\n self.request.POST if self.request.method == 'POST' else None,\n queryset=AnswerOption.objects.filter(question=self.get_object()) if self.get_object() else AnswerOption.objects.none(),\n event=self.request.event\n )\n\n def save_formset(self, obj):\n if self.formset.is_valid():\n for form in self.formset.initial_forms:\n if form in self.formset.deleted_forms:\n if not form.instance.pk:\n continue\n obj.log_action(\n 'pretalx.question.option.delete', person=self.request.user, orga=True, data={\n 'id': form.instance.pk\n }\n )\n form.instance.delete()\n form.instance.pk = None\n elif form.has_changed():\n form.instance.question = obj\n form.save()\n change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}\n change_data['id'] = form.instance.pk\n obj.log_action(\n 'pretalx.question.option.update',\n person=self.request.user, orga=True, data=change_data\n )\n\n for form in self.formset.extra_forms:\n if not form.has_changed():\n continue\n if self.formset._should_delete_form(form):\n continue\n form.instance.question = obj\n form.save()\n change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}\n change_data['id'] = form.instance.pk\n obj.log_action(\n 'pretalx.question.option.create',\n person=self.request.user, orga=True, data=change_data\n )\n\n return True\n return False\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['formset'] = self.formset\n ctx['question'] = self.get_object()\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super().get_form_kwargs(*args, **kwargs)\n if not self.get_object():\n initial = kwargs['initial'] or dict()\n initial['target'] = self.request.GET.get('type')\n kwargs['initial'] = initial\n return kwargs\n\n def get_success_url(self) -> str:\n obj = self.get_object() or self.instance\n return obj.urls.base\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n self.instance = form.instance\n ret = super().form_valid(form)\n if form.cleaned_data.get('variant') in ('choices', 'multiple_choice'):\n result = self.save_formset(self.instance)\n if not result:\n return self.get(self.request, *self.args, **self.kwargs)\n if form.has_changed():\n action = 'pretalx.question.' + ('update' if self.object else 'create')\n form.instance.log_action(action, person=self.request.user, orga=True)\n messages.success(self.request, 'The question has been saved.')\n return ret\n\n\nclass CfPQuestionDelete(PermissionRequired, View):\n permission_required = 'orga.remove_question'\n\n def get_object(self) -> Question:\n return Question.all_objects.get(event=self.request.event, pk=self.kwargs.get('pk'))\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n question = self.get_object()\n\n try:\n with transaction.atomic():\n question.options.all().delete()\n question.delete()\n question.log_action('pretalx.question.delete', person=self.request.user, orga=True)\n messages.success(request, _('The question has been deleted.'))\n except ProtectedError:\n question.active = False\n question.save()\n messages.error(request, _('You cannot delete a question that has already been answered. We have deactivated the question instead.'))\n return redirect(self.request.event.cfp.urls.questions)\n\n\nclass CfPQuestionToggle(PermissionRequired, View):\n permission_required = 'orga.edit_question'\n\n def get_object(self) -> Question:\n return Question.all_objects.filter(event=self.request.event, pk=self.kwargs.get('pk')).first()\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n question = self.get_object()\n\n question.active = not question.active\n question.save(update_fields=['active'])\n return redirect(question.urls.base)\n\n\nclass SubmissionTypeList(PermissionRequired, ListView):\n template_name = 'orga/cfp/submission_type_view.html'\n context_object_name = 'types'\n permission_required = 'orga.view_submission_type'\n\n def get_permission_object(self):\n return self.request.event\n\n def get_queryset(self):\n return self.request.event.submission_types.all()\n\n\nclass SubmissionTypeDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):\n model = SubmissionType\n form_class = SubmissionTypeForm\n template_name = 'orga/cfp/submission_type_form.html'\n permission_required = 'orga.edit_submission_type'\n write_permission_required = 'orga.edit_submission_type'\n\n def get_success_url(self) -> str:\n return self.request.event.cfp.urls.types\n\n def get_object(self):\n return self.request.event.submission_types.filter(pk=self.kwargs.get('pk')).first()\n\n def get_permission_object(self):\n return self.get_object() or self.request.event\n\n def form_valid(self, form):\n messages.success(self.request, 'The Submission Type has been saved.')\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n if form.has_changed():\n action = 'pretalx.submission_type.' + ('update' if self.object else 'create')\n form.instance.log_action(action, person=self.request.user, orga=True)\n return ret\n\n\nclass SubmissionTypeDefault(PermissionRequired, View):\n permission_required = 'orga.edit_submission_type'\n\n def get_object(self):\n return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n submission_type = self.get_object()\n self.request.event.cfp.default_type = submission_type\n self.request.event.cfp.save(update_fields=['default_type'])\n submission_type.log_action('pretalx.submission_type.make_default', person=self.request.user, orga=True)\n messages.success(request, _('The Submission Type has been made default.'))\n return redirect(self.request.event.cfp.urls.types)\n\n\nclass SubmissionTypeDelete(PermissionRequired, View):\n permission_required = 'orga.remove_submission_type'\n\n def get_object(self):\n return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n submission_type = self.get_object()\n\n if request.event.submission_types.count() == 1:\n messages.error(request, _('You cannot delete the only submission type. Try creating another one first!'))\n elif request.event.cfp.default_type == submission_type:\n messages.error(request, _('You cannot delete the default submission type. Make another type default first!'))\n else:\n try:\n submission_type.delete()\n request.event.log_action('pretalx.submission_type.delete', person=self.request.user, orga=True)\n messages.success(request, _('The Submission Type has been deleted.'))\n except ProtectedError: # TODO: show which/how many submissions are concerned\n messages.error(request, _('This Submission Type is in use in a submission and cannot be deleted.'))\n return redirect(self.request.event.cfp.urls.types)\n", "path": "src/pretalx/orga/views/cfp.py"}], "after_files": [{"content": "from csp.decorators import csp_update\nfrom django.contrib import messages\nfrom django.db import transaction\nfrom django.db.models.deletion import ProtectedError\nfrom django.forms.models import inlineformset_factory\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.functional import cached_property\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import ListView, TemplateView, UpdateView, View\n\nfrom pretalx.common.forms import I18nFormSet\nfrom pretalx.common.mixins.views import ActionFromUrl, PermissionRequired\nfrom pretalx.common.views import CreateOrUpdateView\nfrom pretalx.orga.forms import CfPForm, QuestionForm, SubmissionTypeForm\nfrom pretalx.orga.forms.cfp import AnswerOptionForm, CfPSettingsForm\nfrom pretalx.submission.models import (\n AnswerOption, CfP, Question, SubmissionType,\n)\n\n\nclass CfPTextDetail(PermissionRequired, ActionFromUrl, UpdateView):\n form_class = CfPForm\n model = CfP\n template_name = 'orga/cfp/text.html'\n permission_required = 'orga.edit_cfp'\n write_permission_required = 'orga.edit_cfp'\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['sform'] = self.sform\n return ctx\n\n @cached_property\n def sform(self):\n return CfPSettingsForm(\n read_only=(self._action == 'view'),\n locales=self.request.event.locales,\n obj=self.request.event,\n attribute_name='settings',\n data=self.request.POST if self.request.method == \"POST\" else None,\n prefix='settings'\n )\n\n def get_object(self):\n return self.request.event.cfp\n\n def get_success_url(self) -> str:\n return self.get_object().urls.text\n\n def form_valid(self, form):\n if not self.sform.is_valid():\n messages.error(self.request, _('We had trouble saving your input.'))\n return self.form_invalid(form)\n messages.success(self.request, 'The CfP update has been saved.')\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n if form.has_changed():\n form.instance.log_action('pretalx.cfp.update', person=self.request.user, orga=True)\n self.sform.save()\n return ret\n\n\nclass CfPQuestionList(PermissionRequired, TemplateView):\n template_name = 'orga/cfp/question_view.html'\n permission_required = 'orga.view_question'\n\n def get_permission_object(self):\n return self.request.event\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['speaker_questions'] = Question.all_objects.filter(event=self.request.event, target='speaker')\n ctx['submission_questions'] = Question.all_objects.filter(event=self.request.event, target='submission')\n return ctx\n\n\n@method_decorator(csp_update(SCRIPT_SRC=\"'self' 'unsafe-inline'\"), name='dispatch')\nclass CfPQuestionDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):\n model = Question\n form_class = QuestionForm\n permission_required = 'orga.edit_question'\n write_permission_required = 'orga.edit_question'\n\n def get_template_names(self):\n action = self.request.path.lstrip('/').rpartition('/')[2]\n if action in ('edit', 'new'):\n return 'orga/cfp/question_form.html'\n return 'orga/cfp/question_detail.html'\n\n def get_permission_object(self):\n return self.get_object() or self.request.event\n\n def get_object(self) -> Question:\n return Question.all_objects.filter(event=self.request.event, pk=self.kwargs.get('pk')).first()\n\n @cached_property\n def formset(self):\n formset_class = inlineformset_factory(\n Question, AnswerOption, form=AnswerOptionForm, formset=I18nFormSet,\n can_delete=True, extra=0,\n )\n return formset_class(\n self.request.POST if self.request.method == 'POST' else None,\n queryset=AnswerOption.objects.filter(question=self.get_object()) if self.get_object() else AnswerOption.objects.none(),\n event=self.request.event\n )\n\n def save_formset(self, obj):\n if self.formset.is_valid():\n for form in self.formset.initial_forms:\n if form in self.formset.deleted_forms:\n if not form.instance.pk:\n continue\n obj.log_action(\n 'pretalx.question.option.delete', person=self.request.user, orga=True, data={\n 'id': form.instance.pk\n }\n )\n form.instance.delete()\n form.instance.pk = None\n elif form.has_changed():\n form.instance.question = obj\n form.save()\n change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}\n change_data['id'] = form.instance.pk\n obj.log_action(\n 'pretalx.question.option.update',\n person=self.request.user, orga=True, data=change_data\n )\n\n for form in self.formset.extra_forms:\n if not form.has_changed():\n continue\n if self.formset._should_delete_form(form):\n continue\n form.instance.question = obj\n form.save()\n change_data = {k: form.cleaned_data.get(k) for k in form.changed_data}\n change_data['id'] = form.instance.pk\n obj.log_action(\n 'pretalx.question.option.create',\n person=self.request.user, orga=True, data=change_data\n )\n\n return True\n return False\n\n def get_context_data(self, *args, **kwargs):\n ctx = super().get_context_data(*args, **kwargs)\n ctx['formset'] = self.formset\n ctx['question'] = self.get_object()\n return ctx\n\n def get_form_kwargs(self, *args, **kwargs):\n kwargs = super().get_form_kwargs(*args, **kwargs)\n if not self.get_object():\n initial = kwargs['initial'] or dict()\n initial['target'] = self.request.GET.get('type')\n kwargs['initial'] = initial\n return kwargs\n\n def get_success_url(self) -> str:\n obj = self.get_object() or self.instance\n return obj.urls.base\n\n @transaction.atomic\n def form_valid(self, form):\n form.instance.event = self.request.event\n self.instance = form.instance\n ret = super().form_valid(form)\n if form.cleaned_data.get('variant') in ('choices', 'multiple_choice'):\n result = self.save_formset(self.instance)\n if not result:\n return self.get(self.request, *self.args, **self.kwargs)\n if form.has_changed():\n action = 'pretalx.question.' + ('update' if self.object else 'create')\n form.instance.log_action(action, person=self.request.user, orga=True)\n messages.success(self.request, 'The question has been saved.')\n return ret\n\n\nclass CfPQuestionDelete(PermissionRequired, View):\n permission_required = 'orga.remove_question'\n\n def get_object(self) -> Question:\n return Question.all_objects.get(event=self.request.event, pk=self.kwargs.get('pk'))\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n question = self.get_object()\n\n try:\n with transaction.atomic():\n question.options.all().delete()\n question.delete()\n question.log_action('pretalx.question.delete', person=self.request.user, orga=True)\n messages.success(request, _('The question has been deleted.'))\n except ProtectedError:\n question.active = False\n question.save()\n messages.error(request, _('You cannot delete a question that has already been answered. We have deactivated the question instead.'))\n return redirect(self.request.event.cfp.urls.questions)\n\n\nclass CfPQuestionToggle(PermissionRequired, View):\n permission_required = 'orga.edit_question'\n\n def get_object(self) -> Question:\n return Question.all_objects.filter(event=self.request.event, pk=self.kwargs.get('pk')).first()\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n question = self.get_object()\n\n question.active = not question.active\n question.save(update_fields=['active'])\n return redirect(question.urls.base)\n\n\nclass SubmissionTypeList(PermissionRequired, ListView):\n template_name = 'orga/cfp/submission_type_view.html'\n context_object_name = 'types'\n permission_required = 'orga.view_submission_type'\n\n def get_permission_object(self):\n return self.request.event\n\n def get_queryset(self):\n return self.request.event.submission_types.all()\n\n\nclass SubmissionTypeDetail(PermissionRequired, ActionFromUrl, CreateOrUpdateView):\n model = SubmissionType\n form_class = SubmissionTypeForm\n template_name = 'orga/cfp/submission_type_form.html'\n permission_required = 'orga.edit_submission_type'\n write_permission_required = 'orga.edit_submission_type'\n\n def get_success_url(self) -> str:\n return self.request.event.cfp.urls.types\n\n def get_object(self):\n return self.request.event.submission_types.filter(pk=self.kwargs.get('pk')).first()\n\n def get_permission_object(self):\n return self.get_object() or self.request.event\n\n def form_valid(self, form):\n messages.success(self.request, 'The Submission Type has been saved.')\n form.instance.event = self.request.event\n ret = super().form_valid(form)\n if form.has_changed():\n action = 'pretalx.submission_type.' + ('update' if self.object else 'create')\n form.instance.log_action(action, person=self.request.user, orga=True)\n return ret\n\n\nclass SubmissionTypeDefault(PermissionRequired, View):\n permission_required = 'orga.edit_submission_type'\n\n def get_object(self):\n return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n submission_type = self.get_object()\n self.request.event.cfp.default_type = submission_type\n self.request.event.cfp.save(update_fields=['default_type'])\n submission_type.log_action('pretalx.submission_type.make_default', person=self.request.user, orga=True)\n messages.success(request, _('The Submission Type has been made default.'))\n return redirect(self.request.event.cfp.urls.types)\n\n\nclass SubmissionTypeDelete(PermissionRequired, View):\n permission_required = 'orga.remove_submission_type'\n\n def get_object(self):\n return self.request.event.submission_types.get(pk=self.kwargs.get('pk'))\n\n def dispatch(self, request, *args, **kwargs):\n super().dispatch(request, *args, **kwargs)\n submission_type = self.get_object()\n\n if request.event.submission_types.count() == 1:\n messages.error(request, _('You cannot delete the only submission type. Try creating another one first!'))\n elif request.event.cfp.default_type == submission_type:\n messages.error(request, _('You cannot delete the default submission type. Make another type default first!'))\n else:\n try:\n submission_type.delete()\n request.event.log_action('pretalx.submission_type.delete', person=self.request.user, orga=True)\n messages.success(request, _('The Submission Type has been deleted.'))\n except ProtectedError: # TODO: show which/how many submissions are concerned\n messages.error(request, _('This Submission Type is in use in a submission and cannot be deleted.'))\n return redirect(self.request.event.cfp.urls.types)\n", "path": "src/pretalx/orga/views/cfp.py"}]}
| 3,744 | 151 |
gh_patches_debug_2462
|
rasdani/github-patches
|
git_diff
|
ansible-collections__community.aws-1206
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ec2_customer_gateway: bgp_asn is not required
### Summary
The ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter.
It says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method:
```
def ensure_cgw_present(self, bgp_asn, ip_address):
if not bgp_asn:
bgp_asn = 65000
response = self.ec2.create_customer_gateway(
DryRun=False,
Type='ipsec.1',
PublicIp=ip_address,
BgpAsn=bgp_asn,
)
return response
### Issue Type
Documentation Report
### Component Name
ec2_customer_gateway
### Ansible Version
```console (paste below)
$ ansible --version
ansible [core 2.12.4]
config file = None
configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible
ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections
executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible
python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)]
jinja version = 3.1.1
libyaml = True
```
### Collection Versions
```console (paste below)
$ ansible-galaxy collection list
```
### Configuration
```console (paste below)
$ ansible-config dump --only-changed
```
### OS / Environment
main branch, as of 2022-04-18.
### Additional Information
Suggested rewording:
```
options:
bgp_asn:
description:
- Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000.
type: int
```
### Code of Conduct
- [X] I agree to follow the Ansible Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugins/modules/ec2_customer_gateway.py`
Content:
```
1 #!/usr/bin/python
2 #
3 # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
4
5 from __future__ import absolute_import, division, print_function
6 __metaclass__ = type
7
8
9 DOCUMENTATION = '''
10 ---
11 module: ec2_customer_gateway
12 version_added: 1.0.0
13 short_description: Manage an AWS customer gateway
14 description:
15 - Manage an AWS customer gateway.
16 author: Michael Baydoun (@MichaelBaydoun)
17 notes:
18 - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
19 first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
20 requests do not create new customer gateway resources.
21 - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
22 customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
23 options:
24 bgp_asn:
25 description:
26 - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).
27 type: int
28 ip_address:
29 description:
30 - Internet-routable IP address for customers gateway, must be a static address.
31 required: true
32 type: str
33 name:
34 description:
35 - Name of the customer gateway.
36 required: true
37 type: str
38 routing:
39 description:
40 - The type of routing.
41 choices: ['static', 'dynamic']
42 default: dynamic
43 type: str
44 state:
45 description:
46 - Create or terminate the Customer Gateway.
47 default: present
48 choices: [ 'present', 'absent' ]
49 type: str
50 extends_documentation_fragment:
51 - amazon.aws.aws
52 - amazon.aws.ec2
53
54 '''
55
56 EXAMPLES = '''
57 - name: Create Customer Gateway
58 community.aws.ec2_customer_gateway:
59 bgp_asn: 12345
60 ip_address: 1.2.3.4
61 name: IndianapolisOffice
62 region: us-east-1
63 register: cgw
64
65 - name: Delete Customer Gateway
66 community.aws.ec2_customer_gateway:
67 ip_address: 1.2.3.4
68 name: IndianapolisOffice
69 state: absent
70 region: us-east-1
71 register: cgw
72 '''
73
74 RETURN = '''
75 gateway.customer_gateways:
76 description: details about the gateway that was created.
77 returned: success
78 type: complex
79 contains:
80 bgp_asn:
81 description: The Border Gateway Autonomous System Number.
82 returned: when exists and gateway is available.
83 sample: 65123
84 type: str
85 customer_gateway_id:
86 description: gateway id assigned by amazon.
87 returned: when exists and gateway is available.
88 sample: cgw-cb6386a2
89 type: str
90 ip_address:
91 description: ip address of your gateway device.
92 returned: when exists and gateway is available.
93 sample: 1.2.3.4
94 type: str
95 state:
96 description: state of gateway.
97 returned: when gateway exists and is available.
98 sample: available
99 type: str
100 tags:
101 description: Any tags on the gateway.
102 returned: when gateway exists and is available, and when tags exist.
103 type: list
104 type:
105 description: encryption type.
106 returned: when gateway exists and is available.
107 sample: ipsec.1
108 type: str
109 '''
110
111 try:
112 import botocore
113 except ImportError:
114 pass # Handled by AnsibleAWSModule
115
116 from ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict
117
118 from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
119 from ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry
120
121
122 class Ec2CustomerGatewayManager:
123
124 def __init__(self, module):
125 self.module = module
126
127 try:
128 self.ec2 = module.client('ec2')
129 except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:
130 module.fail_json_aws(e, msg='Failed to connect to AWS')
131
132 @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])
133 def ensure_cgw_absent(self, gw_id):
134 response = self.ec2.delete_customer_gateway(
135 DryRun=False,
136 CustomerGatewayId=gw_id
137 )
138 return response
139
140 def ensure_cgw_present(self, bgp_asn, ip_address):
141 if not bgp_asn:
142 bgp_asn = 65000
143 response = self.ec2.create_customer_gateway(
144 DryRun=False,
145 Type='ipsec.1',
146 PublicIp=ip_address,
147 BgpAsn=bgp_asn,
148 )
149 return response
150
151 def tag_cgw_name(self, gw_id, name):
152 response = self.ec2.create_tags(
153 DryRun=False,
154 Resources=[
155 gw_id,
156 ],
157 Tags=[
158 {
159 'Key': 'Name',
160 'Value': name
161 },
162 ]
163 )
164 return response
165
166 def describe_gateways(self, ip_address):
167 response = self.ec2.describe_customer_gateways(
168 DryRun=False,
169 Filters=[
170 {
171 'Name': 'state',
172 'Values': [
173 'available',
174 ]
175 },
176 {
177 'Name': 'ip-address',
178 'Values': [
179 ip_address,
180 ]
181 }
182 ]
183 )
184 return response
185
186
187 def main():
188 argument_spec = dict(
189 bgp_asn=dict(required=False, type='int'),
190 ip_address=dict(required=True),
191 name=dict(required=True),
192 routing=dict(default='dynamic', choices=['dynamic', 'static']),
193 state=dict(default='present', choices=['present', 'absent']),
194 )
195
196 module = AnsibleAWSModule(
197 argument_spec=argument_spec,
198 supports_check_mode=True,
199 required_if=[
200 ('routing', 'dynamic', ['bgp_asn'])
201 ]
202 )
203
204 gw_mgr = Ec2CustomerGatewayManager(module)
205
206 name = module.params.get('name')
207
208 existing = gw_mgr.describe_gateways(module.params['ip_address'])
209
210 results = dict(changed=False)
211 if module.params['state'] == 'present':
212 if existing['CustomerGateways']:
213 existing['CustomerGateway'] = existing['CustomerGateways'][0]
214 results['gateway'] = existing
215 if existing['CustomerGateway']['Tags']:
216 tag_array = existing['CustomerGateway']['Tags']
217 for key, value in enumerate(tag_array):
218 if value['Key'] == 'Name':
219 current_name = value['Value']
220 if current_name != name:
221 results['name'] = gw_mgr.tag_cgw_name(
222 results['gateway']['CustomerGateway']['CustomerGatewayId'],
223 module.params['name'],
224 )
225 results['changed'] = True
226 else:
227 if not module.check_mode:
228 results['gateway'] = gw_mgr.ensure_cgw_present(
229 module.params['bgp_asn'],
230 module.params['ip_address'],
231 )
232 results['name'] = gw_mgr.tag_cgw_name(
233 results['gateway']['CustomerGateway']['CustomerGatewayId'],
234 module.params['name'],
235 )
236 results['changed'] = True
237
238 elif module.params['state'] == 'absent':
239 if existing['CustomerGateways']:
240 existing['CustomerGateway'] = existing['CustomerGateways'][0]
241 results['gateway'] = existing
242 if not module.check_mode:
243 results['gateway'] = gw_mgr.ensure_cgw_absent(
244 existing['CustomerGateway']['CustomerGatewayId']
245 )
246 results['changed'] = True
247
248 pretty_results = camel_dict_to_snake_dict(results)
249 module.exit_json(**pretty_results)
250
251
252 if __name__ == '__main__':
253 main()
254
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py
--- a/plugins/modules/ec2_customer_gateway.py
+++ b/plugins/modules/ec2_customer_gateway.py
@@ -23,7 +23,8 @@
options:
bgp_asn:
description:
- - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).
+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN).
+ - Defaults to C(65000) if not specified when I(state=present).
type: int
ip_address:
description:
|
{"golden_diff": "diff --git a/plugins/modules/ec2_customer_gateway.py b/plugins/modules/ec2_customer_gateway.py\n--- a/plugins/modules/ec2_customer_gateway.py\n+++ b/plugins/modules/ec2_customer_gateway.py\n@@ -23,7 +23,8 @@\n options:\n bgp_asn:\n description:\n- - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n+ - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n+ - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n", "issue": "ec2_customer_gateway: bgp_asn is not required\n### Summary\n\nThe ec2_customer_gateway module has incorrect documentation for the bgp_asn parameter.\r\n\r\nIt says the ASN must be passed when state=present, but the code defaults to 25000 if the parameter is absent. See the ensure_cgw_present() method:\r\n\r\n```\r\n def ensure_cgw_present(self, bgp_asn, ip_address):\r\n if not bgp_asn:\r\n bgp_asn = 65000\r\n response = self.ec2.create_customer_gateway(\r\n DryRun=False,\r\n Type='ipsec.1',\r\n PublicIp=ip_address,\r\n BgpAsn=bgp_asn,\r\n )\r\n return response\n\n### Issue Type\n\nDocumentation Report\n\n### Component Name\n\nec2_customer_gateway\n\n### Ansible Version\n\n```console (paste below)\r\n$ ansible --version\r\nansible [core 2.12.4]\r\n config file = None\r\n configured module search path = ['/home/neil/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']\r\n ansible python module location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/lib/python3.10/site-packages/ansible\r\n ansible collection location = /home/neil/.ansible/collections:/usr/share/ansible/collections\r\n executable location = /home/neil/.local/share/virtualenvs/community.aws-uRL047Ho/bin/ansible\r\n python version = 3.10.1 (main, Jan 10 2022, 00:00:00) [GCC 11.2.1 20211203 (Red Hat 11.2.1-7)]\r\n jinja version = 3.1.1\r\n libyaml = True\r\n```\r\n\n\n### Collection Versions\n\n```console (paste below)\r\n$ ansible-galaxy collection list\r\n```\r\n\n\n### Configuration\n\n```console (paste below)\r\n$ ansible-config dump --only-changed\r\n\r\n```\r\n\n\n### OS / Environment\n\nmain branch, as of 2022-04-18.\n\n### Additional Information\n\nSuggested rewording:\r\n\r\n```\r\noptions:\r\n bgp_asn:\r\n description:\r\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), defaults to 25000.\r\n type: int\r\n```\n\n### Code of Conduct\n\n- [X] I agree to follow the Ansible Code of Conduct\n", "before_files": [{"content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py"}], "after_files": [{"content": "#!/usr/bin/python\n#\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\n\nDOCUMENTATION = '''\n---\nmodule: ec2_customer_gateway\nversion_added: 1.0.0\nshort_description: Manage an AWS customer gateway\ndescription:\n - Manage an AWS customer gateway.\nauthor: Michael Baydoun (@MichaelBaydoun)\nnotes:\n - You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the\n first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent\n requests do not create new customer gateway resources.\n - Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use\n customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.\noptions:\n bgp_asn:\n description:\n - Border Gateway Protocol (BGP) Autonomous System Number (ASN).\n - Defaults to C(65000) if not specified when I(state=present).\n type: int\n ip_address:\n description:\n - Internet-routable IP address for customers gateway, must be a static address.\n required: true\n type: str\n name:\n description:\n - Name of the customer gateway.\n required: true\n type: str\n routing:\n description:\n - The type of routing.\n choices: ['static', 'dynamic']\n default: dynamic\n type: str\n state:\n description:\n - Create or terminate the Customer Gateway.\n default: present\n choices: [ 'present', 'absent' ]\n type: str\nextends_documentation_fragment:\n- amazon.aws.aws\n- amazon.aws.ec2\n\n'''\n\nEXAMPLES = '''\n- name: Create Customer Gateway\n community.aws.ec2_customer_gateway:\n bgp_asn: 12345\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n region: us-east-1\n register: cgw\n\n- name: Delete Customer Gateway\n community.aws.ec2_customer_gateway:\n ip_address: 1.2.3.4\n name: IndianapolisOffice\n state: absent\n region: us-east-1\n register: cgw\n'''\n\nRETURN = '''\ngateway.customer_gateways:\n description: details about the gateway that was created.\n returned: success\n type: complex\n contains:\n bgp_asn:\n description: The Border Gateway Autonomous System Number.\n returned: when exists and gateway is available.\n sample: 65123\n type: str\n customer_gateway_id:\n description: gateway id assigned by amazon.\n returned: when exists and gateway is available.\n sample: cgw-cb6386a2\n type: str\n ip_address:\n description: ip address of your gateway device.\n returned: when exists and gateway is available.\n sample: 1.2.3.4\n type: str\n state:\n description: state of gateway.\n returned: when gateway exists and is available.\n sample: available\n type: str\n tags:\n description: Any tags on the gateway.\n returned: when gateway exists and is available, and when tags exist.\n type: list\n type:\n description: encryption type.\n returned: when gateway exists and is available.\n sample: ipsec.1\n type: str\n'''\n\ntry:\n import botocore\nexcept ImportError:\n pass # Handled by AnsibleAWSModule\n\nfrom ansible.module_utils.common.dict_transformations import camel_dict_to_snake_dict\n\nfrom ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\nfrom ansible_collections.amazon.aws.plugins.module_utils.ec2 import AWSRetry\n\n\nclass Ec2CustomerGatewayManager:\n\n def __init__(self, module):\n self.module = module\n\n try:\n self.ec2 = module.client('ec2')\n except (botocore.exceptions.ClientError, botocore.exceptions.BotoCoreError) as e:\n module.fail_json_aws(e, msg='Failed to connect to AWS')\n\n @AWSRetry.jittered_backoff(delay=2, max_delay=30, retries=6, catch_extra_error_codes=['IncorrectState'])\n def ensure_cgw_absent(self, gw_id):\n response = self.ec2.delete_customer_gateway(\n DryRun=False,\n CustomerGatewayId=gw_id\n )\n return response\n\n def ensure_cgw_present(self, bgp_asn, ip_address):\n if not bgp_asn:\n bgp_asn = 65000\n response = self.ec2.create_customer_gateway(\n DryRun=False,\n Type='ipsec.1',\n PublicIp=ip_address,\n BgpAsn=bgp_asn,\n )\n return response\n\n def tag_cgw_name(self, gw_id, name):\n response = self.ec2.create_tags(\n DryRun=False,\n Resources=[\n gw_id,\n ],\n Tags=[\n {\n 'Key': 'Name',\n 'Value': name\n },\n ]\n )\n return response\n\n def describe_gateways(self, ip_address):\n response = self.ec2.describe_customer_gateways(\n DryRun=False,\n Filters=[\n {\n 'Name': 'state',\n 'Values': [\n 'available',\n ]\n },\n {\n 'Name': 'ip-address',\n 'Values': [\n ip_address,\n ]\n }\n ]\n )\n return response\n\n\ndef main():\n argument_spec = dict(\n bgp_asn=dict(required=False, type='int'),\n ip_address=dict(required=True),\n name=dict(required=True),\n routing=dict(default='dynamic', choices=['dynamic', 'static']),\n state=dict(default='present', choices=['present', 'absent']),\n )\n\n module = AnsibleAWSModule(\n argument_spec=argument_spec,\n supports_check_mode=True,\n required_if=[\n ('routing', 'dynamic', ['bgp_asn'])\n ]\n )\n\n gw_mgr = Ec2CustomerGatewayManager(module)\n\n name = module.params.get('name')\n\n existing = gw_mgr.describe_gateways(module.params['ip_address'])\n\n results = dict(changed=False)\n if module.params['state'] == 'present':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if existing['CustomerGateway']['Tags']:\n tag_array = existing['CustomerGateway']['Tags']\n for key, value in enumerate(tag_array):\n if value['Key'] == 'Name':\n current_name = value['Value']\n if current_name != name:\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n else:\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_present(\n module.params['bgp_asn'],\n module.params['ip_address'],\n )\n results['name'] = gw_mgr.tag_cgw_name(\n results['gateway']['CustomerGateway']['CustomerGatewayId'],\n module.params['name'],\n )\n results['changed'] = True\n\n elif module.params['state'] == 'absent':\n if existing['CustomerGateways']:\n existing['CustomerGateway'] = existing['CustomerGateways'][0]\n results['gateway'] = existing\n if not module.check_mode:\n results['gateway'] = gw_mgr.ensure_cgw_absent(\n existing['CustomerGateway']['CustomerGatewayId']\n )\n results['changed'] = True\n\n pretty_results = camel_dict_to_snake_dict(results)\n module.exit_json(**pretty_results)\n\n\nif __name__ == '__main__':\n main()\n", "path": "plugins/modules/ec2_customer_gateway.py"}]}
| 3,184 | 136 |
gh_patches_debug_18332
|
rasdani/github-patches
|
git_diff
|
pyjanitor-devs__pyjanitor-569
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[INF] Make requirements.txt smaller
Follow-up from #257
The idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.
so we can install the package per feature as needed, such as with extra biology. It goes `pip install "pyjanitor[biology]"`
The example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
[INF] Make requirements.txt smaller
Follow-up from #257
The idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.
so we can install the package per feature as needed, such as with extra biology. It goes `pip install "pyjanitor[biology]"`
The example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import re
2 from pathlib import Path
3
4 from setuptools import setup
5
6
7 def requirements():
8 with open("requirements.txt", "r+") as f:
9 return f.read()
10
11
12 def generate_long_description() -> str:
13 """
14 Extra chunks from README for PyPI description.
15
16 Target chunks must be contained within `.. pypi-doc` pair comments,
17 so there must be an even number of comments in README.
18
19 :returns: Extracted description from README
20
21 """
22 # Read the contents of README file
23 this_directory = Path(__file__).parent
24 with open(this_directory / "README.rst", encoding="utf-8") as f:
25 readme = f.read()
26
27 # Find pypi-doc comments in README
28 indices = [m.start() for m in re.finditer(".. pypi-doc", readme)]
29 if len(indices) % 2 != 0:
30 raise Exception("Odd number of `.. pypi-doc` comments in README")
31
32 # Loop through pairs of comments and save text between pairs
33 long_description = ""
34 for i in range(0, len(indices), 2):
35 start_index = indices[i] + 11
36 end_index = indices[i + 1]
37 long_description += readme[start_index:end_index]
38 return long_description
39
40
41 setup(
42 name="pyjanitor",
43 version="0.18.2",
44 description="Tools for cleaning pandas DataFrames",
45 author="Eric J. Ma",
46 author_email="[email protected]",
47 url="https://github.com/ericmjl/pyjanitor",
48 packages=["janitor"],
49 install_requires=requirements(),
50 python_requires=">=3.6",
51 long_description=generate_long_description(),
52 long_description_content_type="text/x-rst",
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -38,6 +38,12 @@
return long_description
+extra_spark = ["pyspark"]
+extra_biology = ["biopython"]
+extra_chemistry = ["rdkit"]
+extra_engineering = ["unyt"]
+extra_all = extra_biology + extra_engineering + extra_spark
+
setup(
name="pyjanitor",
version="0.18.2",
@@ -47,6 +53,14 @@
url="https://github.com/ericmjl/pyjanitor",
packages=["janitor"],
install_requires=requirements(),
+ extras_require={
+ "all": extra_all,
+ "biology": extra_biology,
+ # "chemistry": extra_chemistry, should be inserted once rdkit
+ # fixes https://github.com/rdkit/rdkit/issues/1812
+ "engineering": extra_engineering,
+ "spark": extra_spark,
+ },
python_requires=">=3.6",
long_description=generate_long_description(),
long_description_content_type="text/x-rst",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -38,6 +38,12 @@\n return long_description\n \n \n+extra_spark = [\"pyspark\"]\n+extra_biology = [\"biopython\"]\n+extra_chemistry = [\"rdkit\"]\n+extra_engineering = [\"unyt\"]\n+extra_all = extra_biology + extra_engineering + extra_spark\n+\n setup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n@@ -47,6 +53,14 @@\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n+ extras_require={\n+ \"all\": extra_all,\n+ \"biology\": extra_biology,\n+ # \"chemistry\": extra_chemistry, should be inserted once rdkit\n+ # fixes https://github.com/rdkit/rdkit/issues/1812\n+ \"engineering\": extra_engineering,\n+ \"spark\": extra_spark,\n+ },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n", "issue": "[INF] Make requirements.txt smaller\nFollow-up from #257 \r\n\r\nThe idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.\r\n\r\nso we can install the package per feature as needed, such as with extra biology. It goes `pip install \"pyjanitor[biology]\"`\r\n\r\nThe example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies\n[INF] Make requirements.txt smaller\nFollow-up from #257 \r\n\r\nThe idea is to have feature-specific requirements.txt. Such as for biology, specifically requires biopython.\r\n\r\nso we can install the package per feature as needed, such as with extra biology. It goes `pip install \"pyjanitor[biology]\"`\r\n\r\nThe example of such implementations in `setup.py` is available at this link: https://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-extras-optional-features-with-their-own-dependencies\n", "before_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "import re\nfrom pathlib import Path\n\nfrom setuptools import setup\n\n\ndef requirements():\n with open(\"requirements.txt\", \"r+\") as f:\n return f.read()\n\n\ndef generate_long_description() -> str:\n \"\"\"\n Extra chunks from README for PyPI description.\n\n Target chunks must be contained within `.. pypi-doc` pair comments,\n so there must be an even number of comments in README.\n\n :returns: Extracted description from README\n\n \"\"\"\n # Read the contents of README file\n this_directory = Path(__file__).parent\n with open(this_directory / \"README.rst\", encoding=\"utf-8\") as f:\n readme = f.read()\n\n # Find pypi-doc comments in README\n indices = [m.start() for m in re.finditer(\".. pypi-doc\", readme)]\n if len(indices) % 2 != 0:\n raise Exception(\"Odd number of `.. pypi-doc` comments in README\")\n\n # Loop through pairs of comments and save text between pairs\n long_description = \"\"\n for i in range(0, len(indices), 2):\n start_index = indices[i] + 11\n end_index = indices[i + 1]\n long_description += readme[start_index:end_index]\n return long_description\n\n\nextra_spark = [\"pyspark\"]\nextra_biology = [\"biopython\"]\nextra_chemistry = [\"rdkit\"]\nextra_engineering = [\"unyt\"]\nextra_all = extra_biology + extra_engineering + extra_spark\n\nsetup(\n name=\"pyjanitor\",\n version=\"0.18.2\",\n description=\"Tools for cleaning pandas DataFrames\",\n author=\"Eric J. Ma\",\n author_email=\"[email protected]\",\n url=\"https://github.com/ericmjl/pyjanitor\",\n packages=[\"janitor\"],\n install_requires=requirements(),\n extras_require={\n \"all\": extra_all,\n \"biology\": extra_biology,\n # \"chemistry\": extra_chemistry, should be inserted once rdkit\n # fixes https://github.com/rdkit/rdkit/issues/1812\n \"engineering\": extra_engineering,\n \"spark\": extra_spark,\n },\n python_requires=\">=3.6\",\n long_description=generate_long_description(),\n long_description_content_type=\"text/x-rst\",\n)\n", "path": "setup.py"}]}
| 965 | 263 |
gh_patches_debug_20555
|
rasdani/github-patches
|
git_diff
|
pytorch__vision-8124
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`VisionDataset` abstract class forces to set 'root' parameter, even if it is unused
### 🐛 Describe the bug
`TypeError: __init__() missing 1 required positional argument: 'root'`
when initializing VisionDataset without `root` param.
```python
from torchvision.transforms import ToTensor
from torchvision.datasets import VisionDataset
class ExtendedVisionDataset(VisionDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
transforms = ToTensor()
dataset =ExtendedVisionDataset(transforms =transforms) # I dont really need root param
```
### Versions
```
PyTorch version: 2.0.0
Is debug build: False
CUDA used to build PyTorch: 11.7
ROCM used to build PyTorch: N/A
OS: Ubuntu 18.04.3 LTS (x86_64)
GCC version: (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0
Clang version: Could not collect
CMake version: version 3.10.2
Libc version: glibc-2.27
Python version: 3.9.18 (main, Sep 11 2023, 13:41:44) [GCC 11.2.0] (64-bit runtime)
Python platform: Linux-5.15.0-88-generic-x86_64-with-glibc2.27
Is CUDA available: True
CUDA runtime version: Could not collect
CUDA_MODULE_LOADING set to: LAZY
GPU models and configuration:
GPU 0: Tesla V100-DGXS-32GB
GPU 1: Tesla V100-DGXS-32GB
GPU 2: Tesla V100-DGXS-32GB
GPU 3: Tesla V100-DGXS-32GB
Nvidia driver version: 515.105.01
cuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.3
HIP runtime version: N/A
MIOpen runtime version: N/A
Is XNNPACK available: True
CPU:
Architecture: x86_64
CPU op-mode(s): 32-bit, 64-bit
Byte Order: Little Endian
CPU(s): 40
On-line CPU(s) list: 0-39
Thread(s) per core: 2
Core(s) per socket: 20
Socket(s): 1
NUMA node(s): 1
Vendor ID: GenuineIntel
CPU family: 6
Model: 79
Model name: Intel(R) Xeon(R) CPU E5-2698 v4 @ 2.20GHz
Stepping: 1
CPU MHz: 2468.528
CPU max MHz: 3600.0000
CPU min MHz: 1200.0000
BogoMIPS: 4397.69
L1d cache: 32K
L1i cache: 32K
L2 cache: 256K
L3 cache: 51200K
NUMA node0 CPU(s): 0-39
Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 ds_cpl smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cdp_l3 invpcid_single pti intel_ppin ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap intel_pt xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts md_clear flush_l1d
Versions of relevant libraries:
[pip3] numpy==1.24.4
[pip3] torch==2.0.0
[pip3] torchmetrics==0.10.3
[pip3] torchvision==0.15.0
[pip3] triton==2.0.0
[conda] blas 1.0 mkl
[conda] mkl 2023.1.0 h213fc3f_46344
[conda] mkl-service 2.4.0 py39h5eee18b_1
[conda] mkl_fft 1.3.8 py39h5eee18b_0
[conda] mkl_random 1.2.4 py39hdb19cb5_0
[conda] numpy 1.24.4 pypi_0 pypi
[conda] pytorch 2.0.0 py3.9_cuda11.7_cudnn8.5.0_0 pytorch
[conda] pytorch-cuda 11.7 h778d358_5 pytorch
[conda] pytorch-mutex 1.0 cuda pytorch
[conda] torchmetrics 0.10.3 pyhd8ed1ab_0 conda-forge
[conda] torchtriton 2.0.0 py39 pytorch
[conda] torchvision 0.15.0 py39_cu117 pytorch```
cc @pmeier
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchvision/datasets/vision.py`
Content:
```
1 import os
2 from typing import Any, Callable, List, Optional, Tuple
3
4 import torch.utils.data as data
5
6 from ..utils import _log_api_usage_once
7
8
9 class VisionDataset(data.Dataset):
10 """
11 Base Class For making datasets which are compatible with torchvision.
12 It is necessary to override the ``__getitem__`` and ``__len__`` method.
13
14 Args:
15 root (string): Root directory of dataset.
16 transforms (callable, optional): A function/transforms that takes in
17 an image and a label and returns the transformed versions of both.
18 transform (callable, optional): A function/transform that takes in an PIL image
19 and returns a transformed version. E.g, ``transforms.RandomCrop``
20 target_transform (callable, optional): A function/transform that takes in the
21 target and transforms it.
22
23 .. note::
24
25 :attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.
26 """
27
28 _repr_indent = 4
29
30 def __init__(
31 self,
32 root: str,
33 transforms: Optional[Callable] = None,
34 transform: Optional[Callable] = None,
35 target_transform: Optional[Callable] = None,
36 ) -> None:
37 _log_api_usage_once(self)
38 if isinstance(root, str):
39 root = os.path.expanduser(root)
40 self.root = root
41
42 has_transforms = transforms is not None
43 has_separate_transform = transform is not None or target_transform is not None
44 if has_transforms and has_separate_transform:
45 raise ValueError("Only transforms or transform/target_transform can be passed as argument")
46
47 # for backwards-compatibility
48 self.transform = transform
49 self.target_transform = target_transform
50
51 if has_separate_transform:
52 transforms = StandardTransform(transform, target_transform)
53 self.transforms = transforms
54
55 def __getitem__(self, index: int) -> Any:
56 """
57 Args:
58 index (int): Index
59
60 Returns:
61 (Any): Sample and meta data, optionally transformed by the respective transforms.
62 """
63 raise NotImplementedError
64
65 def __len__(self) -> int:
66 raise NotImplementedError
67
68 def __repr__(self) -> str:
69 head = "Dataset " + self.__class__.__name__
70 body = [f"Number of datapoints: {self.__len__()}"]
71 if self.root is not None:
72 body.append(f"Root location: {self.root}")
73 body += self.extra_repr().splitlines()
74 if hasattr(self, "transforms") and self.transforms is not None:
75 body += [repr(self.transforms)]
76 lines = [head] + [" " * self._repr_indent + line for line in body]
77 return "\n".join(lines)
78
79 def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
80 lines = transform.__repr__().splitlines()
81 return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
82
83 def extra_repr(self) -> str:
84 return ""
85
86
87 class StandardTransform:
88 def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:
89 self.transform = transform
90 self.target_transform = target_transform
91
92 def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:
93 if self.transform is not None:
94 input = self.transform(input)
95 if self.target_transform is not None:
96 target = self.target_transform(target)
97 return input, target
98
99 def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:
100 lines = transform.__repr__().splitlines()
101 return [f"{head}{lines[0]}"] + ["{}{}".format(" " * len(head), line) for line in lines[1:]]
102
103 def __repr__(self) -> str:
104 body = [self.__class__.__name__]
105 if self.transform is not None:
106 body += self._format_transform_repr(self.transform, "Transform: ")
107 if self.target_transform is not None:
108 body += self._format_transform_repr(self.target_transform, "Target transform: ")
109
110 return "\n".join(body)
111
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py
--- a/torchvision/datasets/vision.py
+++ b/torchvision/datasets/vision.py
@@ -12,7 +12,7 @@
It is necessary to override the ``__getitem__`` and ``__len__`` method.
Args:
- root (string): Root directory of dataset.
+ root (string, optional): Root directory of dataset. Only used for `__repr__`.
transforms (callable, optional): A function/transforms that takes in
an image and a label and returns the transformed versions of both.
transform (callable, optional): A function/transform that takes in an PIL image
@@ -29,7 +29,7 @@
def __init__(
self,
- root: str,
+ root: Optional[str] = None,
transforms: Optional[Callable] = None,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
|
{"golden_diff": "diff --git a/torchvision/datasets/vision.py b/torchvision/datasets/vision.py\n--- a/torchvision/datasets/vision.py\n+++ b/torchvision/datasets/vision.py\n@@ -12,7 +12,7 @@\n It is necessary to override the ``__getitem__`` and ``__len__`` method.\n \n Args:\n- root (string): Root directory of dataset.\n+ root (string, optional): Root directory of dataset. Only used for `__repr__`.\n transforms (callable, optional): A function/transforms that takes in\n an image and a label and returns the transformed versions of both.\n transform (callable, optional): A function/transform that takes in an PIL image\n@@ -29,7 +29,7 @@\n \n def __init__(\n self,\n- root: str,\n+ root: Optional[str] = None,\n transforms: Optional[Callable] = None,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n", "issue": "`VisionDataset` abstract class forces to set 'root' parameter, even if it is unused\n### \ud83d\udc1b Describe the bug\n\n`TypeError: __init__() missing 1 required positional argument: 'root'`\r\n\r\nwhen initializing VisionDataset without `root` param.\r\n\r\n```python\r\n\r\nfrom torchvision.transforms import ToTensor\r\nfrom torchvision.datasets import VisionDataset\r\n\r\nclass ExtendedVisionDataset(VisionDataset):\r\n def __init__(self, **kwargs):\r\n super().__init__(**kwargs) \r\n\r\ntransforms = ToTensor()\r\ndataset =ExtendedVisionDataset(transforms =transforms) # I dont really need root param\r\n```\n\n### Versions\n\n```\r\nPyTorch version: 2.0.0\r\nIs debug build: False\r\nCUDA used to build PyTorch: 11.7\r\nROCM used to build PyTorch: N/A\r\n\r\nOS: Ubuntu 18.04.3 LTS (x86_64)\r\nGCC version: (Ubuntu 7.4.0-1ubuntu1~18.04.1) 7.4.0\r\nClang version: Could not collect\r\nCMake version: version 3.10.2\r\nLibc version: glibc-2.27\r\n\r\nPython version: 3.9.18 (main, Sep 11 2023, 13:41:44) [GCC 11.2.0] (64-bit runtime)\r\nPython platform: Linux-5.15.0-88-generic-x86_64-with-glibc2.27\r\nIs CUDA available: True\r\nCUDA runtime version: Could not collect\r\nCUDA_MODULE_LOADING set to: LAZY\r\nGPU models and configuration: \r\nGPU 0: Tesla V100-DGXS-32GB\r\nGPU 1: Tesla V100-DGXS-32GB\r\nGPU 2: Tesla V100-DGXS-32GB\r\nGPU 3: Tesla V100-DGXS-32GB\r\n\r\nNvidia driver version: 515.105.01\r\ncuDNN version: /usr/lib/x86_64-linux-gnu/libcudnn.so.7.6.3\r\nHIP runtime version: N/A\r\nMIOpen runtime version: N/A\r\nIs XNNPACK available: True\r\n\r\nCPU:\r\nArchitecture: x86_64\r\nCPU op-mode(s): 32-bit, 64-bit\r\nByte Order: Little Endian\r\nCPU(s): 40\r\nOn-line CPU(s) list: 0-39\r\nThread(s) per core: 2\r\nCore(s) per socket: 20\r\nSocket(s): 1\r\nNUMA node(s): 1\r\nVendor ID: GenuineIntel\r\nCPU family: 6\r\nModel: 79\r\nModel name: Intel(R) Xeon(R) CPU E5-2698 v4 @ 2.20GHz\r\nStepping: 1\r\nCPU MHz: 2468.528\r\nCPU max MHz: 3600.0000\r\nCPU min MHz: 1200.0000\r\nBogoMIPS: 4397.69\r\nL1d cache: 32K\r\nL1i cache: 32K\r\nL2 cache: 256K\r\nL3 cache: 51200K\r\nNUMA node0 CPU(s): 0-39\r\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush dts acpi mmx fxsr sse sse2 ss ht tm pbe syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon pebs bts rep_good nopl xtopology nonstop_tsc cpuid aperfmperf pni pclmulqdq dtes64 ds_cpl smx est tm2 ssse3 sdbg fma cx16 xtpr pdcm pcid dca sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand lahf_lm abm 3dnowprefetch cpuid_fault epb cat_l3 cdp_l3 invpcid_single pti intel_ppin ssbd ibrs ibpb stibp fsgsbase tsc_adjust bmi1 hle avx2 smep bmi2 erms invpcid rtm cqm rdt_a rdseed adx smap intel_pt xsaveopt cqm_llc cqm_occup_llc cqm_mbm_total cqm_mbm_local dtherm ida arat pln pts md_clear flush_l1d\r\n\r\nVersions of relevant libraries:\r\n[pip3] numpy==1.24.4\r\n[pip3] torch==2.0.0\r\n[pip3] torchmetrics==0.10.3\r\n[pip3] torchvision==0.15.0\r\n[pip3] triton==2.0.0\r\n[conda] blas 1.0 mkl \r\n[conda] mkl 2023.1.0 h213fc3f_46344 \r\n[conda] mkl-service 2.4.0 py39h5eee18b_1 \r\n[conda] mkl_fft 1.3.8 py39h5eee18b_0 \r\n[conda] mkl_random 1.2.4 py39hdb19cb5_0 \r\n[conda] numpy 1.24.4 pypi_0 pypi\r\n[conda] pytorch 2.0.0 py3.9_cuda11.7_cudnn8.5.0_0 pytorch\r\n[conda] pytorch-cuda 11.7 h778d358_5 pytorch\r\n[conda] pytorch-mutex 1.0 cuda pytorch\r\n[conda] torchmetrics 0.10.3 pyhd8ed1ab_0 conda-forge\r\n[conda] torchtriton 2.0.0 py39 pytorch\r\n[conda] torchvision 0.15.0 py39_cu117 pytorch```\n\ncc @pmeier\n", "before_files": [{"content": "import os\nfrom typing import Any, Callable, List, Optional, Tuple\n\nimport torch.utils.data as data\n\nfrom ..utils import _log_api_usage_once\n\n\nclass VisionDataset(data.Dataset):\n \"\"\"\n Base Class For making datasets which are compatible with torchvision.\n It is necessary to override the ``__getitem__`` and ``__len__`` method.\n\n Args:\n root (string): Root directory of dataset.\n transforms (callable, optional): A function/transforms that takes in\n an image and a label and returns the transformed versions of both.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n\n .. note::\n\n :attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.\n \"\"\"\n\n _repr_indent = 4\n\n def __init__(\n self,\n root: str,\n transforms: Optional[Callable] = None,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n ) -> None:\n _log_api_usage_once(self)\n if isinstance(root, str):\n root = os.path.expanduser(root)\n self.root = root\n\n has_transforms = transforms is not None\n has_separate_transform = transform is not None or target_transform is not None\n if has_transforms and has_separate_transform:\n raise ValueError(\"Only transforms or transform/target_transform can be passed as argument\")\n\n # for backwards-compatibility\n self.transform = transform\n self.target_transform = target_transform\n\n if has_separate_transform:\n transforms = StandardTransform(transform, target_transform)\n self.transforms = transforms\n\n def __getitem__(self, index: int) -> Any:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n (Any): Sample and meta data, optionally transformed by the respective transforms.\n \"\"\"\n raise NotImplementedError\n\n def __len__(self) -> int:\n raise NotImplementedError\n\n def __repr__(self) -> str:\n head = \"Dataset \" + self.__class__.__name__\n body = [f\"Number of datapoints: {self.__len__()}\"]\n if self.root is not None:\n body.append(f\"Root location: {self.root}\")\n body += self.extra_repr().splitlines()\n if hasattr(self, \"transforms\") and self.transforms is not None:\n body += [repr(self.transforms)]\n lines = [head] + [\" \" * self._repr_indent + line for line in body]\n return \"\\n\".join(lines)\n\n def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:\n lines = transform.__repr__().splitlines()\n return [f\"{head}{lines[0]}\"] + [\"{}{}\".format(\" \" * len(head), line) for line in lines[1:]]\n\n def extra_repr(self) -> str:\n return \"\"\n\n\nclass StandardTransform:\n def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:\n self.transform = transform\n self.target_transform = target_transform\n\n def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:\n if self.transform is not None:\n input = self.transform(input)\n if self.target_transform is not None:\n target = self.target_transform(target)\n return input, target\n\n def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:\n lines = transform.__repr__().splitlines()\n return [f\"{head}{lines[0]}\"] + [\"{}{}\".format(\" \" * len(head), line) for line in lines[1:]]\n\n def __repr__(self) -> str:\n body = [self.__class__.__name__]\n if self.transform is not None:\n body += self._format_transform_repr(self.transform, \"Transform: \")\n if self.target_transform is not None:\n body += self._format_transform_repr(self.target_transform, \"Target transform: \")\n\n return \"\\n\".join(body)\n", "path": "torchvision/datasets/vision.py"}], "after_files": [{"content": "import os\nfrom typing import Any, Callable, List, Optional, Tuple\n\nimport torch.utils.data as data\n\nfrom ..utils import _log_api_usage_once\n\n\nclass VisionDataset(data.Dataset):\n \"\"\"\n Base Class For making datasets which are compatible with torchvision.\n It is necessary to override the ``__getitem__`` and ``__len__`` method.\n\n Args:\n root (string, optional): Root directory of dataset. Only used for `__repr__`.\n transforms (callable, optional): A function/transforms that takes in\n an image and a label and returns the transformed versions of both.\n transform (callable, optional): A function/transform that takes in an PIL image\n and returns a transformed version. E.g, ``transforms.RandomCrop``\n target_transform (callable, optional): A function/transform that takes in the\n target and transforms it.\n\n .. note::\n\n :attr:`transforms` and the combination of :attr:`transform` and :attr:`target_transform` are mutually exclusive.\n \"\"\"\n\n _repr_indent = 4\n\n def __init__(\n self,\n root: Optional[str] = None,\n transforms: Optional[Callable] = None,\n transform: Optional[Callable] = None,\n target_transform: Optional[Callable] = None,\n ) -> None:\n _log_api_usage_once(self)\n if isinstance(root, str):\n root = os.path.expanduser(root)\n self.root = root\n\n has_transforms = transforms is not None\n has_separate_transform = transform is not None or target_transform is not None\n if has_transforms and has_separate_transform:\n raise ValueError(\"Only transforms or transform/target_transform can be passed as argument\")\n\n # for backwards-compatibility\n self.transform = transform\n self.target_transform = target_transform\n\n if has_separate_transform:\n transforms = StandardTransform(transform, target_transform)\n self.transforms = transforms\n\n def __getitem__(self, index: int) -> Any:\n \"\"\"\n Args:\n index (int): Index\n\n Returns:\n (Any): Sample and meta data, optionally transformed by the respective transforms.\n \"\"\"\n raise NotImplementedError\n\n def __len__(self) -> int:\n raise NotImplementedError\n\n def __repr__(self) -> str:\n head = \"Dataset \" + self.__class__.__name__\n body = [f\"Number of datapoints: {self.__len__()}\"]\n if self.root is not None:\n body.append(f\"Root location: {self.root}\")\n body += self.extra_repr().splitlines()\n if hasattr(self, \"transforms\") and self.transforms is not None:\n body += [repr(self.transforms)]\n lines = [head] + [\" \" * self._repr_indent + line for line in body]\n return \"\\n\".join(lines)\n\n def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:\n lines = transform.__repr__().splitlines()\n return [f\"{head}{lines[0]}\"] + [\"{}{}\".format(\" \" * len(head), line) for line in lines[1:]]\n\n def extra_repr(self) -> str:\n return \"\"\n\n\nclass StandardTransform:\n def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None:\n self.transform = transform\n self.target_transform = target_transform\n\n def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]:\n if self.transform is not None:\n input = self.transform(input)\n if self.target_transform is not None:\n target = self.target_transform(target)\n return input, target\n\n def _format_transform_repr(self, transform: Callable, head: str) -> List[str]:\n lines = transform.__repr__().splitlines()\n return [f\"{head}{lines[0]}\"] + [\"{}{}\".format(\" \" * len(head), line) for line in lines[1:]]\n\n def __repr__(self) -> str:\n body = [self.__class__.__name__]\n if self.transform is not None:\n body += self._format_transform_repr(self.transform, \"Transform: \")\n if self.target_transform is not None:\n body += self._format_transform_repr(self.target_transform, \"Target transform: \")\n\n return \"\\n\".join(body)\n", "path": "torchvision/datasets/vision.py"}]}
| 2,844 | 228 |
gh_patches_debug_30083
|
rasdani/github-patches
|
git_diff
|
mathesar-foundation__mathesar-1188
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
The type inference algorithm should use `TEXT` rather than `VARCHAR`
## Reproduce
1. "New Table" > "Import Data" > "Copy and Paste Text"
1. Paste the following data and proceed to create and view the table.
```txt
first_name
Marge
Homer
Lisa
Bart
Maggie
```
1. From the `columns` API, expect the response for the `first_name` column to have `"type": "TEXT"`
1. Observe instead that the column is `VARCHAR` without a length set.
## Rationale
- I spoke with @kgodey about the Mathesar Text type today and she say that Mathesar should only be configuring either: `TEXT` columns or `VARCHAR` columns with a length specified. She may be able to elaborate on the thinking that went into this decision.
## Additional context
- In #1118, we are doing some work to bring the front end into alignment with the above expectations when the user manually configures the DB settings for the Text type.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `db/columns/operations/infer_types.py`
Content:
```
1 import logging
2
3 from sqlalchemy import VARCHAR, TEXT, Text
4 from sqlalchemy.exc import DatabaseError
5
6 from db.columns.exceptions import DagCycleError
7 from db.columns.operations.alter import alter_column_type
8 from db.tables.operations.select import get_oid_from_table, reflect_table
9 from db.types.operations.cast import get_supported_alter_column_types
10 from db.types import base
11
12
13 logger = logging.getLogger(__name__)
14
15 MAX_INFERENCE_DAG_DEPTH = 100
16
17 TYPE_INFERENCE_DAG = {
18 base.PostgresType.BOOLEAN.value: [],
19 base.MathesarCustomType.EMAIL.value: [],
20 base.PostgresType.INTERVAL.value: [],
21 base.PostgresType.NUMERIC.value: [
22 base.PostgresType.BOOLEAN.value,
23 ],
24 base.STRING: [
25 base.PostgresType.BOOLEAN.value,
26 base.PostgresType.DATE.value,
27 base.PostgresType.NUMERIC.value,
28 base.MathesarCustomType.MATHESAR_MONEY.value,
29 base.PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.value,
30 base.PostgresType.TIMESTAMP_WITH_TIME_ZONE.value,
31 # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much sense
32 # without additional date information. See postgres documentation for further
33 # details: https://www.postgresql.org/docs/13/datatype-datetime.html
34 base.PostgresType.TIME_WITHOUT_TIME_ZONE.value,
35 base.PostgresType.INTERVAL.value,
36 base.MathesarCustomType.EMAIL.value,
37 base.MathesarCustomType.URI.value,
38 ],
39 }
40
41
42 def _get_reverse_type_map(engine):
43 supported_types = get_supported_alter_column_types(engine)
44 reverse_type_map = {v: k for k, v in supported_types.items()}
45 reverse_type_map.update(
46 {
47 Text: base.STRING,
48 TEXT: base.STRING,
49 VARCHAR: base.STRING,
50 }
51 )
52 return reverse_type_map
53
54
55 def infer_column_type(schema, table_name, column_name, engine, depth=0, type_inference_dag=TYPE_INFERENCE_DAG):
56 if depth > MAX_INFERENCE_DAG_DEPTH:
57 raise DagCycleError("The type_inference_dag likely has a cycle")
58 reverse_type_map = _get_reverse_type_map(engine)
59
60 table = reflect_table(table_name, schema, engine)
61 column_type = table.columns[column_name].type.__class__
62 column_type_str = reverse_type_map.get(column_type)
63
64 logger.debug(f"column_type_str: {column_type_str}")
65 table_oid = get_oid_from_table(table_name, schema, engine)
66 for type_str in type_inference_dag.get(column_type_str, []):
67 try:
68 with engine.begin() as conn:
69 alter_column_type(table_oid, column_name, engine, conn, type_str)
70 logger.info(f"Column {column_name} altered to type {type_str}")
71 column_type = infer_column_type(
72 schema,
73 table_name,
74 column_name,
75 engine,
76 depth=depth + 1,
77 type_inference_dag=type_inference_dag,
78 )
79 break
80 # It's expected we catch this error when the test to see whether
81 # a type is appropriate for a column fails.
82 except DatabaseError:
83 logger.info(
84 f"Cannot alter column {column_name} to type {type_str}"
85 )
86 return column_type
87
```
Path: `db/tables/operations/create.py`
Content:
```
1 from sqlalchemy import Column, String, Table, MetaData
2 from sqlalchemy.ext import compiler
3 from sqlalchemy.schema import DDLElement
4
5 from db.columns.utils import init_mathesar_table_column_list_with_defaults
6 from db.schemas.operations.create import create_schema
7
8
9 def create_mathesar_table(name, schema, columns, engine, metadata=None):
10 """
11 This method creates a Postgres table in the specified schema using the
12 given name and column list. It adds internal mathesar columns to the
13 table.
14 """
15 columns = init_mathesar_table_column_list_with_defaults(columns)
16 create_schema(schema, engine)
17 # We need this so that we can create multiple mathesar tables in the
18 # same MetaData, enabling them to reference each other in the
19 # SQLAlchemy context (e.g., for creating a ForeignKey relationship)
20 if metadata is None:
21 metadata = MetaData(bind=engine, schema=schema)
22 table = Table(
23 name,
24 metadata,
25 *columns,
26 schema=schema
27 )
28 table.create(engine)
29 return table
30
31
32 def create_string_column_table(name, schema, column_names, engine):
33 """
34 This method creates a Postgres table in the specified schema, with all
35 columns being String type.
36 """
37 columns_ = [Column(name=column_name, type_=String) for column_name in column_names]
38 table = create_mathesar_table(name, schema, columns_, engine)
39 return table
40
41
42 class CreateTableAs(DDLElement):
43 def __init__(self, name, selectable):
44 self.name = name
45 self.selectable = selectable
46
47
48 @compiler.compiles(CreateTableAs)
49 def compile_create_table_as(element, compiler, **_):
50 return "CREATE TABLE %s AS (%s)" % (
51 element.name,
52 compiler.sql_compiler.process(element.selectable, literal_binds=True),
53 )
54
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/db/columns/operations/infer_types.py b/db/columns/operations/infer_types.py
--- a/db/columns/operations/infer_types.py
+++ b/db/columns/operations/infer_types.py
@@ -21,7 +21,7 @@
base.PostgresType.NUMERIC.value: [
base.PostgresType.BOOLEAN.value,
],
- base.STRING: [
+ base.PostgresType.TEXT.value: [
base.PostgresType.BOOLEAN.value,
base.PostgresType.DATE.value,
base.PostgresType.NUMERIC.value,
@@ -44,9 +44,9 @@
reverse_type_map = {v: k for k, v in supported_types.items()}
reverse_type_map.update(
{
- Text: base.STRING,
- TEXT: base.STRING,
- VARCHAR: base.STRING,
+ Text: base.PostgresType.TEXT.value,
+ TEXT: base.PostgresType.TEXT.value,
+ VARCHAR: base.PostgresType.TEXT.value,
}
)
return reverse_type_map
diff --git a/db/tables/operations/create.py b/db/tables/operations/create.py
--- a/db/tables/operations/create.py
+++ b/db/tables/operations/create.py
@@ -1,4 +1,4 @@
-from sqlalchemy import Column, String, Table, MetaData
+from sqlalchemy import Column, TEXT, Table, MetaData
from sqlalchemy.ext import compiler
from sqlalchemy.schema import DDLElement
@@ -34,7 +34,7 @@
This method creates a Postgres table in the specified schema, with all
columns being String type.
"""
- columns_ = [Column(name=column_name, type_=String) for column_name in column_names]
+ columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]
table = create_mathesar_table(name, schema, columns_, engine)
return table
|
{"golden_diff": "diff --git a/db/columns/operations/infer_types.py b/db/columns/operations/infer_types.py\n--- a/db/columns/operations/infer_types.py\n+++ b/db/columns/operations/infer_types.py\n@@ -21,7 +21,7 @@\n base.PostgresType.NUMERIC.value: [\n base.PostgresType.BOOLEAN.value,\n ],\n- base.STRING: [\n+ base.PostgresType.TEXT.value: [\n base.PostgresType.BOOLEAN.value,\n base.PostgresType.DATE.value,\n base.PostgresType.NUMERIC.value,\n@@ -44,9 +44,9 @@\n reverse_type_map = {v: k for k, v in supported_types.items()}\n reverse_type_map.update(\n {\n- Text: base.STRING,\n- TEXT: base.STRING,\n- VARCHAR: base.STRING,\n+ Text: base.PostgresType.TEXT.value,\n+ TEXT: base.PostgresType.TEXT.value,\n+ VARCHAR: base.PostgresType.TEXT.value,\n }\n )\n return reverse_type_map\ndiff --git a/db/tables/operations/create.py b/db/tables/operations/create.py\n--- a/db/tables/operations/create.py\n+++ b/db/tables/operations/create.py\n@@ -1,4 +1,4 @@\n-from sqlalchemy import Column, String, Table, MetaData\n+from sqlalchemy import Column, TEXT, Table, MetaData\n from sqlalchemy.ext import compiler\n from sqlalchemy.schema import DDLElement\n \n@@ -34,7 +34,7 @@\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n- columns_ = [Column(name=column_name, type_=String) for column_name in column_names]\n+ columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n", "issue": "The type inference algorithm should use `TEXT` rather than `VARCHAR`\n## Reproduce\r\n\r\n1. \"New Table\" > \"Import Data\" > \"Copy and Paste Text\"\r\n\r\n1. Paste the following data and proceed to create and view the table.\r\n\r\n ```txt\r\n first_name\r\n Marge\r\n Homer\r\n Lisa\r\n Bart\r\n Maggie\r\n ```\r\n\r\n1. From the `columns` API, expect the response for the `first_name` column to have `\"type\": \"TEXT\"`\r\n\r\n1. Observe instead that the column is `VARCHAR` without a length set.\r\n\r\n## Rationale\r\n\r\n- I spoke with @kgodey about the Mathesar Text type today and she say that Mathesar should only be configuring either: `TEXT` columns or `VARCHAR` columns with a length specified. She may be able to elaborate on the thinking that went into this decision.\r\n\r\n\r\n## Additional context\r\n\r\n- In #1118, we are doing some work to bring the front end into alignment with the above expectations when the user manually configures the DB settings for the Text type.\r\n\r\n\n", "before_files": [{"content": "import logging\n\nfrom sqlalchemy import VARCHAR, TEXT, Text\nfrom sqlalchemy.exc import DatabaseError\n\nfrom db.columns.exceptions import DagCycleError\nfrom db.columns.operations.alter import alter_column_type\nfrom db.tables.operations.select import get_oid_from_table, reflect_table\nfrom db.types.operations.cast import get_supported_alter_column_types\nfrom db.types import base\n\n\nlogger = logging.getLogger(__name__)\n\nMAX_INFERENCE_DAG_DEPTH = 100\n\nTYPE_INFERENCE_DAG = {\n base.PostgresType.BOOLEAN.value: [],\n base.MathesarCustomType.EMAIL.value: [],\n base.PostgresType.INTERVAL.value: [],\n base.PostgresType.NUMERIC.value: [\n base.PostgresType.BOOLEAN.value,\n ],\n base.STRING: [\n base.PostgresType.BOOLEAN.value,\n base.PostgresType.DATE.value,\n base.PostgresType.NUMERIC.value,\n base.MathesarCustomType.MATHESAR_MONEY.value,\n base.PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.value,\n base.PostgresType.TIMESTAMP_WITH_TIME_ZONE.value,\n # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much sense\n # without additional date information. See postgres documentation for further\n # details: https://www.postgresql.org/docs/13/datatype-datetime.html\n base.PostgresType.TIME_WITHOUT_TIME_ZONE.value,\n base.PostgresType.INTERVAL.value,\n base.MathesarCustomType.EMAIL.value,\n base.MathesarCustomType.URI.value,\n ],\n}\n\n\ndef _get_reverse_type_map(engine):\n supported_types = get_supported_alter_column_types(engine)\n reverse_type_map = {v: k for k, v in supported_types.items()}\n reverse_type_map.update(\n {\n Text: base.STRING,\n TEXT: base.STRING,\n VARCHAR: base.STRING,\n }\n )\n return reverse_type_map\n\n\ndef infer_column_type(schema, table_name, column_name, engine, depth=0, type_inference_dag=TYPE_INFERENCE_DAG):\n if depth > MAX_INFERENCE_DAG_DEPTH:\n raise DagCycleError(\"The type_inference_dag likely has a cycle\")\n reverse_type_map = _get_reverse_type_map(engine)\n\n table = reflect_table(table_name, schema, engine)\n column_type = table.columns[column_name].type.__class__\n column_type_str = reverse_type_map.get(column_type)\n\n logger.debug(f\"column_type_str: {column_type_str}\")\n table_oid = get_oid_from_table(table_name, schema, engine)\n for type_str in type_inference_dag.get(column_type_str, []):\n try:\n with engine.begin() as conn:\n alter_column_type(table_oid, column_name, engine, conn, type_str)\n logger.info(f\"Column {column_name} altered to type {type_str}\")\n column_type = infer_column_type(\n schema,\n table_name,\n column_name,\n engine,\n depth=depth + 1,\n type_inference_dag=type_inference_dag,\n )\n break\n # It's expected we catch this error when the test to see whether\n # a type is appropriate for a column fails.\n except DatabaseError:\n logger.info(\n f\"Cannot alter column {column_name} to type {type_str}\"\n )\n return column_type\n", "path": "db/columns/operations/infer_types.py"}, {"content": "from sqlalchemy import Column, String, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=String) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}], "after_files": [{"content": "import logging\n\nfrom sqlalchemy import VARCHAR, TEXT, Text\nfrom sqlalchemy.exc import DatabaseError\n\nfrom db.columns.exceptions import DagCycleError\nfrom db.columns.operations.alter import alter_column_type\nfrom db.tables.operations.select import get_oid_from_table, reflect_table\nfrom db.types.operations.cast import get_supported_alter_column_types\nfrom db.types import base\n\n\nlogger = logging.getLogger(__name__)\n\nMAX_INFERENCE_DAG_DEPTH = 100\n\nTYPE_INFERENCE_DAG = {\n base.PostgresType.BOOLEAN.value: [],\n base.MathesarCustomType.EMAIL.value: [],\n base.PostgresType.INTERVAL.value: [],\n base.PostgresType.NUMERIC.value: [\n base.PostgresType.BOOLEAN.value,\n ],\n base.PostgresType.TEXT.value: [\n base.PostgresType.BOOLEAN.value,\n base.PostgresType.DATE.value,\n base.PostgresType.NUMERIC.value,\n base.MathesarCustomType.MATHESAR_MONEY.value,\n base.PostgresType.TIMESTAMP_WITHOUT_TIME_ZONE.value,\n base.PostgresType.TIMESTAMP_WITH_TIME_ZONE.value,\n # We only infer to TIME_WITHOUT_TIME_ZONE as time zones don't make much sense\n # without additional date information. See postgres documentation for further\n # details: https://www.postgresql.org/docs/13/datatype-datetime.html\n base.PostgresType.TIME_WITHOUT_TIME_ZONE.value,\n base.PostgresType.INTERVAL.value,\n base.MathesarCustomType.EMAIL.value,\n base.MathesarCustomType.URI.value,\n ],\n}\n\n\ndef _get_reverse_type_map(engine):\n supported_types = get_supported_alter_column_types(engine)\n reverse_type_map = {v: k for k, v in supported_types.items()}\n reverse_type_map.update(\n {\n Text: base.PostgresType.TEXT.value,\n TEXT: base.PostgresType.TEXT.value,\n VARCHAR: base.PostgresType.TEXT.value,\n }\n )\n return reverse_type_map\n\n\ndef infer_column_type(schema, table_name, column_name, engine, depth=0, type_inference_dag=TYPE_INFERENCE_DAG):\n if depth > MAX_INFERENCE_DAG_DEPTH:\n raise DagCycleError(\"The type_inference_dag likely has a cycle\")\n reverse_type_map = _get_reverse_type_map(engine)\n\n table = reflect_table(table_name, schema, engine)\n column_type = table.columns[column_name].type.__class__\n column_type_str = reverse_type_map.get(column_type)\n\n logger.debug(f\"column_type_str: {column_type_str}\")\n table_oid = get_oid_from_table(table_name, schema, engine)\n for type_str in type_inference_dag.get(column_type_str, []):\n try:\n with engine.begin() as conn:\n alter_column_type(table_oid, column_name, engine, conn, type_str)\n logger.info(f\"Column {column_name} altered to type {type_str}\")\n column_type = infer_column_type(\n schema,\n table_name,\n column_name,\n engine,\n depth=depth + 1,\n type_inference_dag=type_inference_dag,\n )\n break\n # It's expected we catch this error when the test to see whether\n # a type is appropriate for a column fails.\n except DatabaseError:\n logger.info(\n f\"Cannot alter column {column_name} to type {type_str}\"\n )\n return column_type\n", "path": "db/columns/operations/infer_types.py"}, {"content": "from sqlalchemy import Column, TEXT, Table, MetaData\nfrom sqlalchemy.ext import compiler\nfrom sqlalchemy.schema import DDLElement\n\nfrom db.columns.utils import init_mathesar_table_column_list_with_defaults\nfrom db.schemas.operations.create import create_schema\n\n\ndef create_mathesar_table(name, schema, columns, engine, metadata=None):\n \"\"\"\n This method creates a Postgres table in the specified schema using the\n given name and column list. It adds internal mathesar columns to the\n table.\n \"\"\"\n columns = init_mathesar_table_column_list_with_defaults(columns)\n create_schema(schema, engine)\n # We need this so that we can create multiple mathesar tables in the\n # same MetaData, enabling them to reference each other in the\n # SQLAlchemy context (e.g., for creating a ForeignKey relationship)\n if metadata is None:\n metadata = MetaData(bind=engine, schema=schema)\n table = Table(\n name,\n metadata,\n *columns,\n schema=schema\n )\n table.create(engine)\n return table\n\n\ndef create_string_column_table(name, schema, column_names, engine):\n \"\"\"\n This method creates a Postgres table in the specified schema, with all\n columns being String type.\n \"\"\"\n columns_ = [Column(name=column_name, type_=TEXT) for column_name in column_names]\n table = create_mathesar_table(name, schema, columns_, engine)\n return table\n\n\nclass CreateTableAs(DDLElement):\n def __init__(self, name, selectable):\n self.name = name\n self.selectable = selectable\n\n\[email protected](CreateTableAs)\ndef compile_create_table_as(element, compiler, **_):\n return \"CREATE TABLE %s AS (%s)\" % (\n element.name,\n compiler.sql_compiler.process(element.selectable, literal_binds=True),\n )\n", "path": "db/tables/operations/create.py"}]}
| 1,874 | 415 |
gh_patches_debug_36545
|
rasdani/github-patches
|
git_diff
|
translate__pootle-6680
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Try simpler language code as fallback before settings.LANGUAGE_CODE
In https://github.com/translate/pootle/blob/10913224/pootle/i18n/override.py#L87-L101 if the language code `it-IT` (for example) is tried and eventually falls back to `settings.LANGUAGE_CODE`, but it makes sense to first try `it` (simpler version of `it-IT`) before falling back to `settings.LANGUAGE_CODE`.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pootle/i18n/override.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 #
3 # Copyright (C) Pootle contributors.
4 #
5 # This file is a part of the Pootle project. It is distributed under the GPL3
6 # or later license. See the LICENSE file for a copy of the license and the
7 # AUTHORS file for copyright and authorship information.
8
9 """Overrides and support functions for arbitrary locale support."""
10
11 import os
12
13 from translate.lang import data
14
15 from django.utils import translation
16 from django.utils.translation import LANGUAGE_SESSION_KEY, trans_real
17
18 from pootle.i18n import gettext
19
20
21 def find_languages(locale_path):
22 """Generate supported languages list from the :param:`locale_path`
23 directory.
24 """
25 dirs = os.listdir(locale_path)
26 langs = []
27 for lang in dirs:
28 if (data.langcode_re.match(lang) and
29 os.path.isdir(os.path.join(locale_path, lang))):
30 langs.append((trans_real.to_language(lang),
31 data.languages.get(lang, (lang,))[0]))
32 return langs
33
34
35 def supported_langs():
36 """Returns a list of supported locales."""
37 from django.conf import settings
38 return settings.LANGUAGES
39
40
41 def get_lang_from_session(request, supported):
42 if hasattr(request, 'session'):
43 lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
44 if lang_code and lang_code in supported:
45 return lang_code
46
47 return None
48
49
50 def get_lang_from_cookie(request, supported):
51 """See if the user's browser sent a cookie with a preferred language."""
52 from django.conf import settings
53 lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
54
55 if lang_code and lang_code in supported:
56 return lang_code
57
58 return None
59
60
61 def get_lang_from_http_header(request, supported):
62 """If the user's browser sends a list of preferred languages in the
63 HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through
64 the list, and for each entry, we check whether we have a matching
65 pootle translation project. If so, we return it.
66
67 If nothing is found, return None.
68 """
69 accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
70 for accept_lang, __ in trans_real.parse_accept_lang_header(accept):
71 if accept_lang == '*':
72 return None
73
74 normalized = data.normalize_code(data.simplify_to_common(accept_lang))
75 if normalized in ['en-us', 'en']:
76 return None
77 if normalized in supported:
78 return normalized
79
80 # FIXME: horribly slow way of dealing with languages with @ in them
81 for lang in supported.keys():
82 if normalized == data.normalize_code(lang):
83 return lang
84 return None
85
86
87 def get_language_from_request(request, check_path=False):
88 """Try to get the user's preferred language by first checking the
89 cookie and then by checking the HTTP language headers.
90
91 If all fails, try fall back to default language.
92 """
93 supported = dict(supported_langs())
94 for lang_getter in (get_lang_from_session,
95 get_lang_from_cookie,
96 get_lang_from_http_header):
97 lang = lang_getter(request, supported)
98 if lang is not None:
99 return lang
100 from django.conf import settings
101 return settings.LANGUAGE_CODE
102
103
104 def get_language_bidi():
105 """Override for Django's get_language_bidi that's aware of more RTL
106 languages.
107 """
108 return gettext.language_dir(translation.get_language()) == 'rtl'
109
110
111 def hijack_translation():
112 """Sabotage Django's fascist linguistical regime."""
113 # Override functions that check if language is known to Django
114 translation.check_for_language = lambda lang_code: True
115 trans_real.check_for_language = lambda lang_code: True
116 translation.get_language_from_request = get_language_from_request
117
118 # Override django's inadequate bidi detection
119 translation.get_language_bidi = get_language_bidi
120
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pootle/i18n/override.py b/pootle/i18n/override.py
--- a/pootle/i18n/override.py
+++ b/pootle/i18n/override.py
@@ -38,24 +38,35 @@
return settings.LANGUAGES
-def get_lang_from_session(request, supported):
- if hasattr(request, 'session'):
- lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
- if lang_code and lang_code in supported:
- return lang_code
+def get_language_supported(lang_code, supported):
+ normalized = data.normalize_code(data.simplify_to_common(lang_code))
+ if normalized in supported:
+ return normalized
+
+ # FIXME: horribly slow way of dealing with languages with @ in them
+ for lang in supported.keys():
+ if normalized == data.normalize_code(lang):
+ return lang
return None
+def get_lang_from_session(request, supported):
+ if not hasattr(request, 'session'):
+ return None
+ lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)
+ if not lang_code:
+ return None
+ return get_language_supported(lang_code, supported)
+
+
def get_lang_from_cookie(request, supported):
"""See if the user's browser sent a cookie with a preferred language."""
from django.conf import settings
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
-
- if lang_code and lang_code in supported:
- return lang_code
-
- return None
+ if not lang_code:
+ return None
+ return get_language_supported(lang_code, supported)
def get_lang_from_http_header(request, supported):
@@ -70,17 +81,9 @@
for accept_lang, __ in trans_real.parse_accept_lang_header(accept):
if accept_lang == '*':
return None
-
- normalized = data.normalize_code(data.simplify_to_common(accept_lang))
- if normalized in ['en-us', 'en']:
- return None
- if normalized in supported:
- return normalized
-
- # FIXME: horribly slow way of dealing with languages with @ in them
- for lang in supported.keys():
- if normalized == data.normalize_code(lang):
- return lang
+ supported_lang = get_language_supported(accept_lang, supported)
+ if supported_lang:
+ return supported_lang
return None
@@ -98,7 +101,9 @@
if lang is not None:
return lang
from django.conf import settings
- return settings.LANGUAGE_CODE
+ if settings.LANGUAGE_CODE in supported:
+ return settings.LANGUAGE_CODE
+ return 'en-us'
def get_language_bidi():
|
{"golden_diff": "diff --git a/pootle/i18n/override.py b/pootle/i18n/override.py\n--- a/pootle/i18n/override.py\n+++ b/pootle/i18n/override.py\n@@ -38,24 +38,35 @@\n return settings.LANGUAGES\n \n \n-def get_lang_from_session(request, supported):\n- if hasattr(request, 'session'):\n- lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n- if lang_code and lang_code in supported:\n- return lang_code\n+def get_language_supported(lang_code, supported):\n+ normalized = data.normalize_code(data.simplify_to_common(lang_code))\n+ if normalized in supported:\n+ return normalized\n+\n+ # FIXME: horribly slow way of dealing with languages with @ in them\n+ for lang in supported.keys():\n+ if normalized == data.normalize_code(lang):\n+ return lang\n \n return None\n \n \n+def get_lang_from_session(request, supported):\n+ if not hasattr(request, 'session'):\n+ return None\n+ lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n+ if not lang_code:\n+ return None\n+ return get_language_supported(lang_code, supported)\n+\n+\n def get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n-\n- if lang_code and lang_code in supported:\n- return lang_code\n-\n- return None\n+ if not lang_code:\n+ return None\n+ return get_language_supported(lang_code, supported)\n \n \n def get_lang_from_http_header(request, supported):\n@@ -70,17 +81,9 @@\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n-\n- normalized = data.normalize_code(data.simplify_to_common(accept_lang))\n- if normalized in ['en-us', 'en']:\n- return None\n- if normalized in supported:\n- return normalized\n-\n- # FIXME: horribly slow way of dealing with languages with @ in them\n- for lang in supported.keys():\n- if normalized == data.normalize_code(lang):\n- return lang\n+ supported_lang = get_language_supported(accept_lang, supported)\n+ if supported_lang:\n+ return supported_lang\n return None\n \n \n@@ -98,7 +101,9 @@\n if lang is not None:\n return lang\n from django.conf import settings\n- return settings.LANGUAGE_CODE\n+ if settings.LANGUAGE_CODE in supported:\n+ return settings.LANGUAGE_CODE\n+ return 'en-us'\n \n \n def get_language_bidi():\n", "issue": "Try simpler language code as fallback before settings.LANGUAGE_CODE\nIn https://github.com/translate/pootle/blob/10913224/pootle/i18n/override.py#L87-L101 if the language code `it-IT` (for example) is tried and eventually falls back to `settings.LANGUAGE_CODE`, but it makes sense to first try `it` (simpler version of `it-IT`) before falling back to `settings.LANGUAGE_CODE`.\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Overrides and support functions for arbitrary locale support.\"\"\"\n\nimport os\n\nfrom translate.lang import data\n\nfrom django.utils import translation\nfrom django.utils.translation import LANGUAGE_SESSION_KEY, trans_real\n\nfrom pootle.i18n import gettext\n\n\ndef find_languages(locale_path):\n \"\"\"Generate supported languages list from the :param:`locale_path`\n directory.\n \"\"\"\n dirs = os.listdir(locale_path)\n langs = []\n for lang in dirs:\n if (data.langcode_re.match(lang) and\n os.path.isdir(os.path.join(locale_path, lang))):\n langs.append((trans_real.to_language(lang),\n data.languages.get(lang, (lang,))[0]))\n return langs\n\n\ndef supported_langs():\n \"\"\"Returns a list of supported locales.\"\"\"\n from django.conf import settings\n return settings.LANGUAGES\n\n\ndef get_lang_from_session(request, supported):\n if hasattr(request, 'session'):\n lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n if lang_code and lang_code in supported:\n return lang_code\n\n return None\n\n\ndef get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n\n if lang_code and lang_code in supported:\n return lang_code\n\n return None\n\n\ndef get_lang_from_http_header(request, supported):\n \"\"\"If the user's browser sends a list of preferred languages in the\n HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through\n the list, and for each entry, we check whether we have a matching\n pootle translation project. If so, we return it.\n\n If nothing is found, return None.\n \"\"\"\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n\n normalized = data.normalize_code(data.simplify_to_common(accept_lang))\n if normalized in ['en-us', 'en']:\n return None\n if normalized in supported:\n return normalized\n\n # FIXME: horribly slow way of dealing with languages with @ in them\n for lang in supported.keys():\n if normalized == data.normalize_code(lang):\n return lang\n return None\n\n\ndef get_language_from_request(request, check_path=False):\n \"\"\"Try to get the user's preferred language by first checking the\n cookie and then by checking the HTTP language headers.\n\n If all fails, try fall back to default language.\n \"\"\"\n supported = dict(supported_langs())\n for lang_getter in (get_lang_from_session,\n get_lang_from_cookie,\n get_lang_from_http_header):\n lang = lang_getter(request, supported)\n if lang is not None:\n return lang\n from django.conf import settings\n return settings.LANGUAGE_CODE\n\n\ndef get_language_bidi():\n \"\"\"Override for Django's get_language_bidi that's aware of more RTL\n languages.\n \"\"\"\n return gettext.language_dir(translation.get_language()) == 'rtl'\n\n\ndef hijack_translation():\n \"\"\"Sabotage Django's fascist linguistical regime.\"\"\"\n # Override functions that check if language is known to Django\n translation.check_for_language = lambda lang_code: True\n trans_real.check_for_language = lambda lang_code: True\n translation.get_language_from_request = get_language_from_request\n\n # Override django's inadequate bidi detection\n translation.get_language_bidi = get_language_bidi\n", "path": "pootle/i18n/override.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\n\"\"\"Overrides and support functions for arbitrary locale support.\"\"\"\n\nimport os\n\nfrom translate.lang import data\n\nfrom django.utils import translation\nfrom django.utils.translation import LANGUAGE_SESSION_KEY, trans_real\n\nfrom pootle.i18n import gettext\n\n\ndef find_languages(locale_path):\n \"\"\"Generate supported languages list from the :param:`locale_path`\n directory.\n \"\"\"\n dirs = os.listdir(locale_path)\n langs = []\n for lang in dirs:\n if (data.langcode_re.match(lang) and\n os.path.isdir(os.path.join(locale_path, lang))):\n langs.append((trans_real.to_language(lang),\n data.languages.get(lang, (lang,))[0]))\n return langs\n\n\ndef supported_langs():\n \"\"\"Returns a list of supported locales.\"\"\"\n from django.conf import settings\n return settings.LANGUAGES\n\n\ndef get_language_supported(lang_code, supported):\n normalized = data.normalize_code(data.simplify_to_common(lang_code))\n if normalized in supported:\n return normalized\n\n # FIXME: horribly slow way of dealing with languages with @ in them\n for lang in supported.keys():\n if normalized == data.normalize_code(lang):\n return lang\n\n return None\n\n\ndef get_lang_from_session(request, supported):\n if not hasattr(request, 'session'):\n return None\n lang_code = request.session.get(LANGUAGE_SESSION_KEY, None)\n if not lang_code:\n return None\n return get_language_supported(lang_code, supported)\n\n\ndef get_lang_from_cookie(request, supported):\n \"\"\"See if the user's browser sent a cookie with a preferred language.\"\"\"\n from django.conf import settings\n lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)\n if not lang_code:\n return None\n return get_language_supported(lang_code, supported)\n\n\ndef get_lang_from_http_header(request, supported):\n \"\"\"If the user's browser sends a list of preferred languages in the\n HTTP_ACCEPT_LANGUAGE header, parse it into a list. Then walk through\n the list, and for each entry, we check whether we have a matching\n pootle translation project. If so, we return it.\n\n If nothing is found, return None.\n \"\"\"\n accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')\n for accept_lang, __ in trans_real.parse_accept_lang_header(accept):\n if accept_lang == '*':\n return None\n supported_lang = get_language_supported(accept_lang, supported)\n if supported_lang:\n return supported_lang\n return None\n\n\ndef get_language_from_request(request, check_path=False):\n \"\"\"Try to get the user's preferred language by first checking the\n cookie and then by checking the HTTP language headers.\n\n If all fails, try fall back to default language.\n \"\"\"\n supported = dict(supported_langs())\n for lang_getter in (get_lang_from_session,\n get_lang_from_cookie,\n get_lang_from_http_header):\n lang = lang_getter(request, supported)\n if lang is not None:\n return lang\n from django.conf import settings\n if settings.LANGUAGE_CODE in supported:\n return settings.LANGUAGE_CODE\n return 'en-us'\n\n\ndef get_language_bidi():\n \"\"\"Override for Django's get_language_bidi that's aware of more RTL\n languages.\n \"\"\"\n return gettext.language_dir(translation.get_language()) == 'rtl'\n\n\ndef hijack_translation():\n \"\"\"Sabotage Django's fascist linguistical regime.\"\"\"\n # Override functions that check if language is known to Django\n translation.check_for_language = lambda lang_code: True\n trans_real.check_for_language = lambda lang_code: True\n translation.get_language_from_request = get_language_from_request\n\n # Override django's inadequate bidi detection\n translation.get_language_bidi = get_language_bidi\n", "path": "pootle/i18n/override.py"}]}
| 1,461 | 607 |
gh_patches_debug_7348
|
rasdani/github-patches
|
git_diff
|
Kinto__kinto-1131
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Memory backend sometimes show empty permissions
If you do multiple requests on memory backend, the empty permissions cycle between showing and not showing. The same does not happen with postgres.
```json
gsurita-30820:kinto gsurita$ echo '{"permissions": {"read": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a
{
"data": {
"id": "b1",
"last_modified": 1485553456205
},
"permissions": {
"write": [
"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb"
]
}
}
gsurita-30820:kinto gsurita$ echo '{"permissions": {"read": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a
{
"data": {
"id": "b1",
"last_modified": 1485553470501
},
"permissions": {
"collection:create": [],
"group:create": [],
"read": [],
"write": [
"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb"
]
}
}
gsurita-30820:kinto gsurita$ echo '{"permissions": {"read": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a
{
"data": {
"id": "b1",
"last_modified": 1485553471419
},
"permissions": {
"write": [
"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb"
]
}
}
gsurita-30820:kinto gsurita$ echo '{"permissions": {"read": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a
{
"data": {
"id": "b1",
"last_modified": 1485553472203
},
"permissions": {
"collection:create": [],
"group:create": [],
"read": [],
"write": [
"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb"
]
}
}
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kinto/core/permission/memory.py`
Content:
```
1 import re
2
3 from kinto.core.decorators import synchronized
4 from kinto.core.permission import PermissionBase
5
6
7 class Permission(PermissionBase):
8 """Permission backend implementation in local process memory.
9
10 Enable in configuration::
11
12 kinto.permission_backend = kinto.core.permission.memory
13
14 :noindex:
15 """
16
17 def __init__(self, *args, **kwargs):
18 super().__init__(*args, **kwargs)
19 self.flush()
20
21 def initialize_schema(self, dry_run=False):
22 # Nothing to do.
23 pass
24
25 def flush(self):
26 self._store = {}
27
28 @synchronized
29 def add_user_principal(self, user_id, principal):
30 user_key = 'user:{}'.format(user_id)
31 user_principals = self._store.get(user_key, set())
32 user_principals.add(principal)
33 self._store[user_key] = user_principals
34
35 @synchronized
36 def remove_user_principal(self, user_id, principal):
37 user_key = 'user:{}'.format(user_id)
38 user_principals = self._store.get(user_key, set())
39 try:
40 user_principals.remove(principal)
41 except KeyError:
42 pass
43 if len(user_principals) == 0:
44 if user_key in self._store:
45 del self._store[user_key]
46 else:
47 self._store[user_key] = user_principals
48
49 @synchronized
50 def remove_principal(self, principal):
51 for user_principals in self._store.values():
52 try:
53 user_principals.remove(principal)
54 except KeyError:
55 pass
56
57 @synchronized
58 def get_user_principals(self, user_id):
59 # Fetch the groups the user is in.
60 user_key = 'user:{}'.format(user_id)
61 members = self._store.get(user_key, set())
62 # Fetch the groups system.Authenticated is in.
63 group_authenticated = self._store.get('user:system.Authenticated', set())
64 return members | group_authenticated
65
66 @synchronized
67 def add_principal_to_ace(self, object_id, permission, principal):
68 permission_key = 'permission:{}:{}'.format(object_id, permission)
69 object_permission_principals = self._store.get(permission_key, set())
70 object_permission_principals.add(principal)
71 self._store[permission_key] = object_permission_principals
72
73 @synchronized
74 def remove_principal_from_ace(self, object_id, permission, principal):
75 permission_key = 'permission:{}:{}'.format(object_id, permission)
76 object_permission_principals = self._store.get(permission_key, set())
77 try:
78 object_permission_principals.remove(principal)
79 except KeyError:
80 pass
81 if len(object_permission_principals) == 0:
82 if permission_key in self._store:
83 del self._store[permission_key]
84 else:
85 self._store[permission_key] = object_permission_principals
86
87 @synchronized
88 def get_object_permission_principals(self, object_id, permission):
89 permission_key = 'permission:{}:{}'.format(object_id, permission)
90 members = self._store.get(permission_key, set())
91 return members
92
93 @synchronized
94 def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):
95 principals = set(principals)
96 candidates = []
97 if bound_permissions is None:
98 for key, value in self._store.items():
99 _, object_id, permission = key.split(':', 2)
100 candidates.append((object_id, permission, value))
101 else:
102 for pattern, perm in bound_permissions:
103 id_match = '.*' if with_children else '[^/]+'
104 regexp = re.compile('^{}$'.format(pattern.replace('*', id_match)))
105 for key, value in self._store.items():
106 if key.endswith(perm):
107 object_id = key.split(':')[1]
108 if regexp.match(object_id):
109 candidates.append((object_id, perm, value))
110
111 perms_by_object_id = {}
112 for (object_id, perm, value) in candidates:
113 if len(principals & value) > 0:
114 perms_by_object_id.setdefault(object_id, set()).add(perm)
115 return perms_by_object_id
116
117 @synchronized
118 def get_authorized_principals(self, bound_permissions):
119 principals = set()
120 for obj_id, perm in bound_permissions:
121 principals |= self.get_object_permission_principals(obj_id, perm)
122 return principals
123
124 @synchronized
125 def get_objects_permissions(self, objects_ids, permissions=None):
126 result = []
127 for object_id in objects_ids:
128 if permissions is None:
129 aces = [k for k in self._store.keys()
130 if k.startswith('permission:{}:'.format(object_id))]
131 else:
132 aces = ['permission:{}:{}'.format(object_id, permission)
133 for permission in permissions]
134 perms = {}
135 for ace in aces:
136 # Should work with 'permission:/url/id:record:create'.
137 permission = ace.split(':', 2)[2]
138 perms[permission] = set(self._store[ace])
139 result.append(perms)
140 return result
141
142 @synchronized
143 def replace_object_permissions(self, object_id, permissions):
144 for permission, principals in permissions.items():
145 permission_key = 'permission:{}:{}'.format(object_id, permission)
146 if permission_key in self._store and len(principals) == 0:
147 del self._store[permission_key]
148 else:
149 self._store[permission_key] = set(principals)
150 return permissions
151
152 @synchronized
153 def delete_object_permissions(self, *object_id_list):
154 to_delete = []
155 for key in self._store.keys():
156 object_id = key.split(':')[1]
157 for pattern in object_id_list:
158 regexp = re.compile('^{}$'.format(pattern.replace('*', '.*')))
159 if regexp.match(object_id):
160 to_delete.append(key)
161 for k in to_delete:
162 del self._store[k]
163
164
165 def load_from_config(config):
166 return Permission()
167
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kinto/core/permission/memory.py b/kinto/core/permission/memory.py
--- a/kinto/core/permission/memory.py
+++ b/kinto/core/permission/memory.py
@@ -145,7 +145,7 @@
permission_key = 'permission:{}:{}'.format(object_id, permission)
if permission_key in self._store and len(principals) == 0:
del self._store[permission_key]
- else:
+ elif principals:
self._store[permission_key] = set(principals)
return permissions
|
{"golden_diff": "diff --git a/kinto/core/permission/memory.py b/kinto/core/permission/memory.py\n--- a/kinto/core/permission/memory.py\n+++ b/kinto/core/permission/memory.py\n@@ -145,7 +145,7 @@\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n if permission_key in self._store and len(principals) == 0:\n del self._store[permission_key]\n- else:\n+ elif principals:\n self._store[permission_key] = set(principals)\n return permissions\n", "issue": "Memory backend sometimes show empty permissions\nIf you do multiple requests on memory backend, the empty permissions cycle between showing and not showing. The same does not happen with postgres.\r\n\r\n```json\r\ngsurita-30820:kinto gsurita$ echo '{\"permissions\": {\"read\": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a\r\n\r\n{\r\n \"data\": {\r\n \"id\": \"b1\",\r\n \"last_modified\": 1485553456205\r\n },\r\n \"permissions\": {\r\n \"write\": [\r\n \"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb\"\r\n ]\r\n }\r\n}\r\n\r\ngsurita-30820:kinto gsurita$ echo '{\"permissions\": {\"read\": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a\r\n\r\n{\r\n \"data\": {\r\n \"id\": \"b1\",\r\n \"last_modified\": 1485553470501\r\n },\r\n \"permissions\": {\r\n \"collection:create\": [],\r\n \"group:create\": [],\r\n \"read\": [],\r\n \"write\": [\r\n \"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb\"\r\n ]\r\n }\r\n}\r\n\r\ngsurita-30820:kinto gsurita$ echo '{\"permissions\": {\"read\": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a\r\n\r\n{\r\n \"data\": {\r\n \"id\": \"b1\",\r\n \"last_modified\": 1485553471419\r\n },\r\n \"permissions\": {\r\n \"write\": [\r\n \"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb\"\r\n ]\r\n }\r\n}\r\n\r\ngsurita-30820:kinto gsurita$ echo '{\"permissions\": {\"read\": []}}' | http put localhost:8888/v1/buckets/b1 -a a:a\r\n\r\n{\r\n \"data\": {\r\n \"id\": \"b1\",\r\n \"last_modified\": 1485553472203\r\n },\r\n \"permissions\": {\r\n \"collection:create\": [],\r\n \"group:create\": [],\r\n \"read\": [],\r\n \"write\": [\r\n \"basicauth:80866b4d0726f35eda20b90bc479a38727c99c68d7c88a87f3b860726a79daeb\"\r\n ]\r\n }\r\n}\r\n\n", "before_files": [{"content": "import re\n\nfrom kinto.core.decorators import synchronized\nfrom kinto.core.permission import PermissionBase\n\n\nclass Permission(PermissionBase):\n \"\"\"Permission backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.permission_backend = kinto.core.permission.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._store = {}\n\n @synchronized\n def add_user_principal(self, user_id, principal):\n user_key = 'user:{}'.format(user_id)\n user_principals = self._store.get(user_key, set())\n user_principals.add(principal)\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_user_principal(self, user_id, principal):\n user_key = 'user:{}'.format(user_id)\n user_principals = self._store.get(user_key, set())\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n if len(user_principals) == 0:\n if user_key in self._store:\n del self._store[user_key]\n else:\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_principal(self, principal):\n for user_principals in self._store.values():\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n\n @synchronized\n def get_user_principals(self, user_id):\n # Fetch the groups the user is in.\n user_key = 'user:{}'.format(user_id)\n members = self._store.get(user_key, set())\n # Fetch the groups system.Authenticated is in.\n group_authenticated = self._store.get('user:system.Authenticated', set())\n return members | group_authenticated\n\n @synchronized\n def add_principal_to_ace(self, object_id, permission, principal):\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n object_permission_principals = self._store.get(permission_key, set())\n object_permission_principals.add(principal)\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def remove_principal_from_ace(self, object_id, permission, principal):\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n object_permission_principals = self._store.get(permission_key, set())\n try:\n object_permission_principals.remove(principal)\n except KeyError:\n pass\n if len(object_permission_principals) == 0:\n if permission_key in self._store:\n del self._store[permission_key]\n else:\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def get_object_permission_principals(self, object_id, permission):\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n members = self._store.get(permission_key, set())\n return members\n\n @synchronized\n def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):\n principals = set(principals)\n candidates = []\n if bound_permissions is None:\n for key, value in self._store.items():\n _, object_id, permission = key.split(':', 2)\n candidates.append((object_id, permission, value))\n else:\n for pattern, perm in bound_permissions:\n id_match = '.*' if with_children else '[^/]+'\n regexp = re.compile('^{}$'.format(pattern.replace('*', id_match)))\n for key, value in self._store.items():\n if key.endswith(perm):\n object_id = key.split(':')[1]\n if regexp.match(object_id):\n candidates.append((object_id, perm, value))\n\n perms_by_object_id = {}\n for (object_id, perm, value) in candidates:\n if len(principals & value) > 0:\n perms_by_object_id.setdefault(object_id, set()).add(perm)\n return perms_by_object_id\n\n @synchronized\n def get_authorized_principals(self, bound_permissions):\n principals = set()\n for obj_id, perm in bound_permissions:\n principals |= self.get_object_permission_principals(obj_id, perm)\n return principals\n\n @synchronized\n def get_objects_permissions(self, objects_ids, permissions=None):\n result = []\n for object_id in objects_ids:\n if permissions is None:\n aces = [k for k in self._store.keys()\n if k.startswith('permission:{}:'.format(object_id))]\n else:\n aces = ['permission:{}:{}'.format(object_id, permission)\n for permission in permissions]\n perms = {}\n for ace in aces:\n # Should work with 'permission:/url/id:record:create'.\n permission = ace.split(':', 2)[2]\n perms[permission] = set(self._store[ace])\n result.append(perms)\n return result\n\n @synchronized\n def replace_object_permissions(self, object_id, permissions):\n for permission, principals in permissions.items():\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n if permission_key in self._store and len(principals) == 0:\n del self._store[permission_key]\n else:\n self._store[permission_key] = set(principals)\n return permissions\n\n @synchronized\n def delete_object_permissions(self, *object_id_list):\n to_delete = []\n for key in self._store.keys():\n object_id = key.split(':')[1]\n for pattern in object_id_list:\n regexp = re.compile('^{}$'.format(pattern.replace('*', '.*')))\n if regexp.match(object_id):\n to_delete.append(key)\n for k in to_delete:\n del self._store[k]\n\n\ndef load_from_config(config):\n return Permission()\n", "path": "kinto/core/permission/memory.py"}], "after_files": [{"content": "import re\n\nfrom kinto.core.decorators import synchronized\nfrom kinto.core.permission import PermissionBase\n\n\nclass Permission(PermissionBase):\n \"\"\"Permission backend implementation in local process memory.\n\n Enable in configuration::\n\n kinto.permission_backend = kinto.core.permission.memory\n\n :noindex:\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.flush()\n\n def initialize_schema(self, dry_run=False):\n # Nothing to do.\n pass\n\n def flush(self):\n self._store = {}\n\n @synchronized\n def add_user_principal(self, user_id, principal):\n user_key = 'user:{}'.format(user_id)\n user_principals = self._store.get(user_key, set())\n user_principals.add(principal)\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_user_principal(self, user_id, principal):\n user_key = 'user:{}'.format(user_id)\n user_principals = self._store.get(user_key, set())\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n if len(user_principals) == 0:\n if user_key in self._store:\n del self._store[user_key]\n else:\n self._store[user_key] = user_principals\n\n @synchronized\n def remove_principal(self, principal):\n for user_principals in self._store.values():\n try:\n user_principals.remove(principal)\n except KeyError:\n pass\n\n @synchronized\n def get_user_principals(self, user_id):\n # Fetch the groups the user is in.\n user_key = 'user:{}'.format(user_id)\n members = self._store.get(user_key, set())\n # Fetch the groups system.Authenticated is in.\n group_authenticated = self._store.get('user:system.Authenticated', set())\n return members | group_authenticated\n\n @synchronized\n def add_principal_to_ace(self, object_id, permission, principal):\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n object_permission_principals = self._store.get(permission_key, set())\n object_permission_principals.add(principal)\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def remove_principal_from_ace(self, object_id, permission, principal):\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n object_permission_principals = self._store.get(permission_key, set())\n try:\n object_permission_principals.remove(principal)\n except KeyError:\n pass\n if len(object_permission_principals) == 0:\n if permission_key in self._store:\n del self._store[permission_key]\n else:\n self._store[permission_key] = object_permission_principals\n\n @synchronized\n def get_object_permission_principals(self, object_id, permission):\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n members = self._store.get(permission_key, set())\n return members\n\n @synchronized\n def get_accessible_objects(self, principals, bound_permissions=None, with_children=True):\n principals = set(principals)\n candidates = []\n if bound_permissions is None:\n for key, value in self._store.items():\n _, object_id, permission = key.split(':', 2)\n candidates.append((object_id, permission, value))\n else:\n for pattern, perm in bound_permissions:\n id_match = '.*' if with_children else '[^/]+'\n regexp = re.compile('^{}$'.format(pattern.replace('*', id_match)))\n for key, value in self._store.items():\n if key.endswith(perm):\n object_id = key.split(':')[1]\n if regexp.match(object_id):\n candidates.append((object_id, perm, value))\n\n perms_by_object_id = {}\n for (object_id, perm, value) in candidates:\n if len(principals & value) > 0:\n perms_by_object_id.setdefault(object_id, set()).add(perm)\n return perms_by_object_id\n\n @synchronized\n def get_authorized_principals(self, bound_permissions):\n principals = set()\n for obj_id, perm in bound_permissions:\n principals |= self.get_object_permission_principals(obj_id, perm)\n return principals\n\n @synchronized\n def get_objects_permissions(self, objects_ids, permissions=None):\n result = []\n for object_id in objects_ids:\n if permissions is None:\n aces = [k for k in self._store.keys()\n if k.startswith('permission:{}:'.format(object_id))]\n else:\n aces = ['permission:{}:{}'.format(object_id, permission)\n for permission in permissions]\n perms = {}\n for ace in aces:\n # Should work with 'permission:/url/id:record:create'.\n permission = ace.split(':', 2)[2]\n perms[permission] = set(self._store[ace])\n result.append(perms)\n return result\n\n @synchronized\n def replace_object_permissions(self, object_id, permissions):\n for permission, principals in permissions.items():\n permission_key = 'permission:{}:{}'.format(object_id, permission)\n if permission_key in self._store and len(principals) == 0:\n del self._store[permission_key]\n elif principals:\n self._store[permission_key] = set(principals)\n return permissions\n\n @synchronized\n def delete_object_permissions(self, *object_id_list):\n to_delete = []\n for key in self._store.keys():\n object_id = key.split(':')[1]\n for pattern in object_id_list:\n regexp = re.compile('^{}$'.format(pattern.replace('*', '.*')))\n if regexp.match(object_id):\n to_delete.append(key)\n for k in to_delete:\n del self._store[k]\n\n\ndef load_from_config(config):\n return Permission()\n", "path": "kinto/core/permission/memory.py"}]}
| 2,677 | 123 |
gh_patches_debug_23153
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-1172
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Feature: pip-sync --dry-run should return a non-zero exit code if changes were to occur
#### What's the problem this feature will solve?
I'm looking to add a pre-commit hook to check the environment is up to date
```
- repo: local
hooks:
- id: pip-sync
name: pip-sync check
entry: pip-sync --dry-run
language: system
always_run: true
pass_filenames: false
```
#### Describe the solution you'd like
```
$ pip-sync --dry-run
Would install:
numpy==1.18.5
$ $?
2
```
#### Alternative Solutions
various | awk stuff
#### Additional context
NA
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/sync.py`
Content:
```
1 import collections
2 import os
3 import sys
4 import tempfile
5 from subprocess import check_call # nosec
6
7 from pip._internal.commands.freeze import DEV_PKGS
8 from pip._internal.utils.compat import stdlib_pkgs
9
10 from . import click
11 from .exceptions import IncompatibleRequirements
12 from .utils import (
13 flat_map,
14 format_requirement,
15 get_hashes_from_ireq,
16 is_url_requirement,
17 key_from_ireq,
18 key_from_req,
19 )
20
21 PACKAGES_TO_IGNORE = (
22 ["-markerlib", "pip", "pip-tools", "pip-review", "pkg-resources"]
23 + list(stdlib_pkgs)
24 + list(DEV_PKGS)
25 )
26
27
28 def dependency_tree(installed_keys, root_key):
29 """
30 Calculate the dependency tree for the package `root_key` and return
31 a collection of all its dependencies. Uses a DFS traversal algorithm.
32
33 `installed_keys` should be a {key: requirement} mapping, e.g.
34 {'django': from_line('django==1.8')}
35 `root_key` should be the key to return the dependency tree for.
36 """
37 dependencies = set()
38 queue = collections.deque()
39
40 if root_key in installed_keys:
41 dep = installed_keys[root_key]
42 queue.append(dep)
43
44 while queue:
45 v = queue.popleft()
46 key = key_from_req(v)
47 if key in dependencies:
48 continue
49
50 dependencies.add(key)
51
52 for dep_specifier in v.requires():
53 dep_name = key_from_req(dep_specifier)
54 if dep_name in installed_keys:
55 dep = installed_keys[dep_name]
56
57 if dep_specifier.specifier.contains(dep.version):
58 queue.append(dep)
59
60 return dependencies
61
62
63 def get_dists_to_ignore(installed):
64 """
65 Returns a collection of package names to ignore when performing pip-sync,
66 based on the currently installed environment. For example, when pip-tools
67 is installed in the local environment, it should be ignored, including all
68 of its dependencies (e.g. click). When pip-tools is not installed
69 locally, click should also be installed/uninstalled depending on the given
70 requirements.
71 """
72 installed_keys = {key_from_req(r): r for r in installed}
73 return list(
74 flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)
75 )
76
77
78 def merge(requirements, ignore_conflicts):
79 by_key = {}
80
81 for ireq in requirements:
82 # Limitation: URL requirements are merged by precise string match, so
83 # "file:///example.zip#egg=example", "file:///example.zip", and
84 # "example==1.0" will not merge with each other
85 if ireq.match_markers():
86 key = key_from_ireq(ireq)
87
88 if not ignore_conflicts:
89 existing_ireq = by_key.get(key)
90 if existing_ireq:
91 # NOTE: We check equality here since we can assume that the
92 # requirements are all pinned
93 if ireq.specifier != existing_ireq.specifier:
94 raise IncompatibleRequirements(ireq, existing_ireq)
95
96 # TODO: Always pick the largest specifier in case of a conflict
97 by_key[key] = ireq
98 return by_key.values()
99
100
101 def diff_key_from_ireq(ireq):
102 """
103 Calculate a key for comparing a compiled requirement with installed modules.
104 For URL requirements, only provide a useful key if the url includes
105 #egg=name==version, which will set ireq.req.name and ireq.specifier.
106 Otherwise return ireq.link so the key will not match and the package will
107 reinstall. Reinstall is necessary to ensure that packages will reinstall
108 if the URL is changed but the version is not.
109 """
110 if is_url_requirement(ireq):
111 if (
112 ireq.req
113 and (getattr(ireq.req, "key", None) or getattr(ireq.req, "name", None))
114 and ireq.specifier
115 ):
116 return key_from_ireq(ireq)
117 return str(ireq.link)
118 return key_from_ireq(ireq)
119
120
121 def diff(compiled_requirements, installed_dists):
122 """
123 Calculate which packages should be installed or uninstalled, given a set
124 of compiled requirements and a list of currently installed modules.
125 """
126 requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}
127
128 satisfied = set() # holds keys
129 to_install = set() # holds InstallRequirement objects
130 to_uninstall = set() # holds keys
131
132 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
133 for dist in installed_dists:
134 key = key_from_req(dist)
135 if key not in requirements_lut or not requirements_lut[key].match_markers():
136 to_uninstall.add(key)
137 elif requirements_lut[key].specifier.contains(dist.version):
138 satisfied.add(key)
139
140 for key, requirement in requirements_lut.items():
141 if key not in satisfied and requirement.match_markers():
142 to_install.add(requirement)
143
144 # Make sure to not uninstall any packages that should be ignored
145 to_uninstall -= set(pkgs_to_ignore)
146
147 return (to_install, to_uninstall)
148
149
150 def sync(
151 to_install,
152 to_uninstall,
153 verbose=False,
154 dry_run=False,
155 install_flags=None,
156 ask=False,
157 ):
158 """
159 Install and uninstalls the given sets of modules.
160 """
161 if not to_uninstall and not to_install:
162 if verbose:
163 click.echo("Everything up-to-date")
164 return 0
165
166 pip_flags = []
167 if not verbose:
168 pip_flags += ["-q"]
169
170 if ask:
171 dry_run = True
172
173 if dry_run:
174 if to_uninstall:
175 click.echo("Would uninstall:")
176 for pkg in sorted(to_uninstall):
177 click.echo(" {}".format(pkg))
178
179 if to_install:
180 click.echo("Would install:")
181 for ireq in sorted(to_install, key=key_from_ireq):
182 click.echo(" {}".format(format_requirement(ireq)))
183
184 if ask and click.confirm("Would you like to proceed with these changes?"):
185 dry_run = False
186
187 if not dry_run:
188 if to_uninstall:
189 check_call( # nosec
190 [sys.executable, "-m", "pip", "uninstall", "-y"]
191 + pip_flags
192 + sorted(to_uninstall)
193 )
194
195 if to_install:
196 if install_flags is None:
197 install_flags = []
198 # prepare requirement lines
199 req_lines = []
200 for ireq in sorted(to_install, key=key_from_ireq):
201 ireq_hashes = get_hashes_from_ireq(ireq)
202 req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
203
204 # save requirement lines to a temporary file
205 tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False)
206 tmp_req_file.write("\n".join(req_lines))
207 tmp_req_file.close()
208
209 try:
210 check_call( # nosec
211 [sys.executable, "-m", "pip", "install", "-r", tmp_req_file.name]
212 + pip_flags
213 + install_flags
214 )
215 finally:
216 os.unlink(tmp_req_file.name)
217
218 return 0
219
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -158,10 +158,12 @@
"""
Install and uninstalls the given sets of modules.
"""
+ exit_code = 0
+
if not to_uninstall and not to_install:
if verbose:
click.echo("Everything up-to-date")
- return 0
+ return exit_code
pip_flags = []
if not verbose:
@@ -181,8 +183,11 @@
for ireq in sorted(to_install, key=key_from_ireq):
click.echo(" {}".format(format_requirement(ireq)))
+ exit_code = 1
+
if ask and click.confirm("Would you like to proceed with these changes?"):
dry_run = False
+ exit_code = 0
if not dry_run:
if to_uninstall:
@@ -215,4 +220,4 @@
finally:
os.unlink(tmp_req_file.name)
- return 0
+ return exit_code
|
{"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -158,10 +158,12 @@\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n+ exit_code = 0\n+\n if not to_uninstall and not to_install:\n if verbose:\n click.echo(\"Everything up-to-date\")\n- return 0\n+ return exit_code\n \n pip_flags = []\n if not verbose:\n@@ -181,8 +183,11 @@\n for ireq in sorted(to_install, key=key_from_ireq):\n click.echo(\" {}\".format(format_requirement(ireq)))\n \n+ exit_code = 1\n+\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n+ exit_code = 0\n \n if not dry_run:\n if to_uninstall:\n@@ -215,4 +220,4 @@\n finally:\n os.unlink(tmp_req_file.name)\n \n- return 0\n+ return exit_code\n", "issue": "Feature: pip-sync --dry-run should return a non-zero exit code if changes were to occur\n#### What's the problem this feature will solve?\r\nI'm looking to add a pre-commit hook to check the environment is up to date\r\n\r\n```\r\n- repo: local\r\n hooks:\r\n - id: pip-sync\r\n name: pip-sync check\r\n entry: pip-sync --dry-run\r\n language: system\r\n always_run: true\r\n pass_filenames: false\r\n```\r\n\r\n#### Describe the solution you'd like\r\n```\r\n$ pip-sync --dry-run\r\nWould install:\r\n numpy==1.18.5\r\n$ $?\r\n2\r\n```\r\n\r\n\r\n#### Alternative Solutions\r\nvarious | awk stuff\r\n\r\n#### Additional context\r\nNA\r\n\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call # nosec\n\nfrom pip._internal.commands.freeze import DEV_PKGS\nfrom pip._internal.utils.compat import stdlib_pkgs\n\nfrom . import click\nfrom .exceptions import IncompatibleRequirements\nfrom .utils import (\n flat_map,\n format_requirement,\n get_hashes_from_ireq,\n is_url_requirement,\n key_from_ireq,\n key_from_req,\n)\n\nPACKAGES_TO_IGNORE = (\n [\"-markerlib\", \"pip\", \"pip-tools\", \"pip-review\", \"pkg-resources\"]\n + list(stdlib_pkgs)\n + list(DEV_PKGS)\n)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(\n flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)\n )\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n if ireq.match_markers():\n key = key_from_ireq(ireq)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n return by_key.values()\n\n\ndef diff_key_from_ireq(ireq):\n \"\"\"\n Calculate a key for comparing a compiled requirement with installed modules.\n For URL requirements, only provide a useful key if the url includes\n #egg=name==version, which will set ireq.req.name and ireq.specifier.\n Otherwise return ireq.link so the key will not match and the package will\n reinstall. Reinstall is necessary to ensure that packages will reinstall\n if the URL is changed but the version is not.\n \"\"\"\n if is_url_requirement(ireq):\n if (\n ireq.req\n and (getattr(ireq.req, \"key\", None) or getattr(ireq.req, \"name\", None))\n and ireq.specifier\n ):\n return key_from_ireq(ireq)\n return str(ireq.link)\n return key_from_ireq(ireq)\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(\n to_install,\n to_uninstall,\n verbose=False,\n dry_run=False,\n install_flags=None,\n ask=False,\n):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n if verbose:\n click.echo(\"Everything up-to-date\")\n return 0\n\n pip_flags = []\n if not verbose:\n pip_flags += [\"-q\"]\n\n if ask:\n dry_run = True\n\n if dry_run:\n if to_uninstall:\n click.echo(\"Would uninstall:\")\n for pkg in sorted(to_uninstall):\n click.echo(\" {}\".format(pkg))\n\n if to_install:\n click.echo(\"Would install:\")\n for ireq in sorted(to_install, key=key_from_ireq):\n click.echo(\" {}\".format(format_requirement(ireq)))\n\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n\n if not dry_run:\n if to_uninstall:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\"]\n + pip_flags\n + sorted(to_uninstall)\n )\n\n if to_install:\n if install_flags is None:\n install_flags = []\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode=\"wt\", delete=False)\n tmp_req_file.write(\"\\n\".join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"install\", \"-r\", tmp_req_file.name]\n + pip_flags\n + install_flags\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}], "after_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call # nosec\n\nfrom pip._internal.commands.freeze import DEV_PKGS\nfrom pip._internal.utils.compat import stdlib_pkgs\n\nfrom . import click\nfrom .exceptions import IncompatibleRequirements\nfrom .utils import (\n flat_map,\n format_requirement,\n get_hashes_from_ireq,\n is_url_requirement,\n key_from_ireq,\n key_from_req,\n)\n\nPACKAGES_TO_IGNORE = (\n [\"-markerlib\", \"pip\", \"pip-tools\", \"pip-review\", \"pkg-resources\"]\n + list(stdlib_pkgs)\n + list(DEV_PKGS)\n)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(\n flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)\n )\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n if ireq.match_markers():\n key = key_from_ireq(ireq)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n return by_key.values()\n\n\ndef diff_key_from_ireq(ireq):\n \"\"\"\n Calculate a key for comparing a compiled requirement with installed modules.\n For URL requirements, only provide a useful key if the url includes\n #egg=name==version, which will set ireq.req.name and ireq.specifier.\n Otherwise return ireq.link so the key will not match and the package will\n reinstall. Reinstall is necessary to ensure that packages will reinstall\n if the URL is changed but the version is not.\n \"\"\"\n if is_url_requirement(ireq):\n if (\n ireq.req\n and (getattr(ireq.req, \"key\", None) or getattr(ireq.req, \"name\", None))\n and ireq.specifier\n ):\n return key_from_ireq(ireq)\n return str(ireq.link)\n return key_from_ireq(ireq)\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(\n to_install,\n to_uninstall,\n verbose=False,\n dry_run=False,\n install_flags=None,\n ask=False,\n):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n exit_code = 0\n\n if not to_uninstall and not to_install:\n if verbose:\n click.echo(\"Everything up-to-date\")\n return exit_code\n\n pip_flags = []\n if not verbose:\n pip_flags += [\"-q\"]\n\n if ask:\n dry_run = True\n\n if dry_run:\n if to_uninstall:\n click.echo(\"Would uninstall:\")\n for pkg in sorted(to_uninstall):\n click.echo(\" {}\".format(pkg))\n\n if to_install:\n click.echo(\"Would install:\")\n for ireq in sorted(to_install, key=key_from_ireq):\n click.echo(\" {}\".format(format_requirement(ireq)))\n\n exit_code = 1\n\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n exit_code = 0\n\n if not dry_run:\n if to_uninstall:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\"]\n + pip_flags\n + sorted(to_uninstall)\n )\n\n if to_install:\n if install_flags is None:\n install_flags = []\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode=\"wt\", delete=False)\n tmp_req_file.write(\"\\n\".join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"install\", \"-r\", tmp_req_file.name]\n + pip_flags\n + install_flags\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return exit_code\n", "path": "piptools/sync.py"}]}
| 2,561 | 259 |
gh_patches_debug_8007
|
rasdani/github-patches
|
git_diff
|
medtagger__MedTagger-401
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error indicator when logging in or registering went wrong
## Current Behaviour
- currently, only error icon is displayed when something went wrong during logging in or registering new account
## Expected Behaviour
- an error message should be displayed next to the error icon, so that user knows what went wrong
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/medtagger/api/auth/business.py`
Content:
```
1 """Module responsible for business logic in all Auth endpoint."""
2 from medtagger.api import InvalidArgumentsException
3 from medtagger.api.security import hash_password, verify_user_password, generate_auth_token
4 from medtagger.database.models import User
5 from medtagger.repositories import roles as RolesRepository, users as UsersRepository
6
7
8 def create_user(email: str, password: str, first_name: str, last_name: str) -> int:
9 """Create user with the given user information. Password is being hashed.
10
11 :param email: user email in string format
12 :param password: user password in string format
13 :param first_name: user first name in string format
14 :param last_name: user last name in string format
15
16 :return: id of the new user
17 """
18 user = UsersRepository.get_user_by_email(email)
19 if user:
20 raise InvalidArgumentsException('User with this email already exist')
21 password_hash = hash_password(password)
22 new_user = User(email, password_hash, first_name, last_name)
23 role = RolesRepository.get_role_with_name('volunteer')
24 if not role:
25 raise InvalidArgumentsException('Role does not exist.')
26 new_user.roles.append(role)
27 return UsersRepository.add_new_user(new_user)
28
29
30 def sign_in_user(email: str, password: str) -> str:
31 """Sign in user using given username and password.
32
33 :param email: user email in string format
34 :param password: user password in string format
35
36 :return: authentication token
37 """
38 user = UsersRepository.get_user_by_email(email)
39 if not user:
40 raise InvalidArgumentsException('User does not exist.')
41 if not verify_user_password(user, password):
42 raise InvalidArgumentsException('Password does not match.')
43 return generate_auth_token(user)
44
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py
--- a/backend/medtagger/api/auth/business.py
+++ b/backend/medtagger/api/auth/business.py
@@ -17,7 +17,7 @@
"""
user = UsersRepository.get_user_by_email(email)
if user:
- raise InvalidArgumentsException('User with this email already exist')
+ raise InvalidArgumentsException('User with this email already exists')
password_hash = hash_password(password)
new_user = User(email, password_hash, first_name, last_name)
role = RolesRepository.get_role_with_name('volunteer')
|
{"golden_diff": "diff --git a/backend/medtagger/api/auth/business.py b/backend/medtagger/api/auth/business.py\n--- a/backend/medtagger/api/auth/business.py\n+++ b/backend/medtagger/api/auth/business.py\n@@ -17,7 +17,7 @@\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n- raise InvalidArgumentsException('User with this email already exist')\n+ raise InvalidArgumentsException('User with this email already exists')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n", "issue": "Error indicator when logging in or registering went wrong\n## Current Behaviour\r\n - currently, only error icon is displayed when something went wrong during logging in or registering new account\r\n\r\n## Expected Behaviour \r\n - an error message should be displayed next to the error icon, so that user knows what went wrong\r\n\n", "before_files": [{"content": "\"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.api.security import hash_password, verify_user_password, generate_auth_token\nfrom medtagger.database.models import User\nfrom medtagger.repositories import roles as RolesRepository, users as UsersRepository\n\n\ndef create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n \"\"\"Create user with the given user information. Password is being hashed.\n\n :param email: user email in string format\n :param password: user password in string format\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n\n :return: id of the new user\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n raise InvalidArgumentsException('User with this email already exist')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n return UsersRepository.add_new_user(new_user)\n\n\ndef sign_in_user(email: str, password: str) -> str:\n \"\"\"Sign in user using given username and password.\n\n :param email: user email in string format\n :param password: user password in string format\n\n :return: authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if not user:\n raise InvalidArgumentsException('User does not exist.')\n if not verify_user_password(user, password):\n raise InvalidArgumentsException('Password does not match.')\n return generate_auth_token(user)\n", "path": "backend/medtagger/api/auth/business.py"}], "after_files": [{"content": "\"\"\"Module responsible for business logic in all Auth endpoint.\"\"\"\nfrom medtagger.api import InvalidArgumentsException\nfrom medtagger.api.security import hash_password, verify_user_password, generate_auth_token\nfrom medtagger.database.models import User\nfrom medtagger.repositories import roles as RolesRepository, users as UsersRepository\n\n\ndef create_user(email: str, password: str, first_name: str, last_name: str) -> int:\n \"\"\"Create user with the given user information. Password is being hashed.\n\n :param email: user email in string format\n :param password: user password in string format\n :param first_name: user first name in string format\n :param last_name: user last name in string format\n\n :return: id of the new user\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if user:\n raise InvalidArgumentsException('User with this email already exists')\n password_hash = hash_password(password)\n new_user = User(email, password_hash, first_name, last_name)\n role = RolesRepository.get_role_with_name('volunteer')\n if not role:\n raise InvalidArgumentsException('Role does not exist.')\n new_user.roles.append(role)\n return UsersRepository.add_new_user(new_user)\n\n\ndef sign_in_user(email: str, password: str) -> str:\n \"\"\"Sign in user using given username and password.\n\n :param email: user email in string format\n :param password: user password in string format\n\n :return: authentication token\n \"\"\"\n user = UsersRepository.get_user_by_email(email)\n if not user:\n raise InvalidArgumentsException('User does not exist.')\n if not verify_user_password(user, password):\n raise InvalidArgumentsException('Password does not match.')\n return generate_auth_token(user)\n", "path": "backend/medtagger/api/auth/business.py"}]}
| 783 | 142 |
gh_patches_debug_40450
|
rasdani/github-patches
|
git_diff
|
obspy__obspy-2950
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
GCF read fails on windows (recent python + numpy 1.22)
https://tests.obspy.org/115141/ & https://tests.obspy.org/115136/ on two independent machines show the error
I suspect it's numpy-dtype related:
```
Traceback (most recent call last):
File "C:\Miniconda3\envs\test\lib\site-packages\numpy\core\fromnumeric.py", line 57, in _wrapfunc
return bound(*args, **kwds)
TypeError: the resolved dtypes are not compatible with add.accumulate. Resolved (dtype('int32'), dtype('int32'), dtype('int32'))
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "D:\a\obspy\obspy\obspy\io\gcf\tests\test_core.py", line 72, in test_read_via_module
st = _read_gcf(filename)
File "D:\a\obspy\obspy\obspy\io\gcf\core.py", line 89, in _read_gcf
hd = libgcf.read(f, **kwargs)
File "D:\a\obspy\obspy\obspy\io\gcf\libgcf.py", line 167, in read
return read_data_block(f, headonly=False, **kwargs)
File "D:\a\obspy\obspy\obspy\io\gcf\libgcf.py", line 144, in read_data_block
data = (fic + np.cumsum(data)).astype('i4')
File "<__array_function__ internals>", line 180, in cumsum
File "C:\Miniconda3\envs\test\lib\site-packages\numpy\core\fromnumeric.py", line 2569, in cumsum
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
File "C:\Miniconda3\envs\test\lib\site-packages\numpy\core\fromnumeric.py", line 66, in _wrapfunc
return _wrapit(obj, method, *args, **kwds)
File "C:\Miniconda3\envs\test\lib\site-packages\numpy\core\fromnumeric.py", line 43, in _wrapit
result = getattr(asarray(obj), method)(*args, **kwds)
TypeError: the resolved dtypes are not compatible with add.accumulate. Resolved (dtype('int32'), dtype('int32'), dtype('int32'))
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `obspy/io/gcf/libgcf.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # reads Guralp Compressed Format (GCF) Files
3 # By Ran Novitsky Nof @ BSL, 2016
4 # [email protected]
5 # Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)
6 # more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro
7 # last access: June, 2016
8 import numpy as np
9
10 from obspy import UTCDateTime
11
12 SPS_D = { # Table 3.1: special sample rates
13 157: 0.1,
14 161: 0.125,
15 162: 0.2,
16 164: 0.25,
17 167: 0.5,
18 171: 400,
19 174: 500,
20 176: 1000,
21 179: 2000,
22 181: 4000}
23 TIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator
24 171: 8.,
25 174: 2.,
26 176: 4.,
27 179: 8.,
28 181: 16.}
29 COMPRESSION_D = { # Table 3.2: format field to data type
30 1: '>i4',
31 2: '>i2',
32 4: '>i1'}
33
34
35 def is_gcf(f):
36 """
37 Test if file is GCF by reading at least 1 data block
38 """
39 header, data = read_data_block(f)
40
41
42 def decode36(data):
43 """
44 Converts an integer into a base36 string.
45 """
46 # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm
47 s = ''
48 while data:
49 imed = data % 36
50 if imed > 9:
51 pos = imed - 10 + ord('A')
52 else:
53 pos = imed + ord('0')
54 c = chr(pos)
55 s = c + s
56 data = data // 36
57 return s
58
59
60 def decode_date_time(data):
61 """
62 Decode date and time field.
63
64 The date code is a 32 bit value specifying the start time of the block.
65 Bits 0-16 contain the number of seconds since midnight,
66 and bits 17-31 the number of days since 17th November 1989.
67 """
68 # prevent numpy array
69 days = int(data >> 17)
70 secs = int(data & 0x1FFFF)
71 starttime = UTCDateTime('1989-11-17') + days * 86400 + secs
72 return starttime
73
74
75 def read_data_block(f, headonly=False, channel_prefix="HH", **kwargs):
76 """
77 Read one data block from GCF file.
78
79 more details can be found here:
80 http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm
81 f - file object to read from
82 if skipData is True, Only header is returned.
83 if not a data block (SPS=0) - returns None.
84 """
85 # get ID
86 sysid = f.read(4)
87 if not sysid:
88 raise EOFError # got to EOF
89 sysid = np.frombuffer(sysid, count=1, dtype='>u4')
90 if sysid >> 31 & 0b1 > 0:
91 sysid = (sysid << 6) >> 6
92 if isinstance(sysid, np.ndarray) and sysid.shape == (1,):
93 sysid = sysid[0]
94 else:
95 raise ValueError('sysid should be a single element np.ndarray')
96 sysid = decode36(sysid)
97 # get Stream ID
98 stid = np.frombuffer(f.read(4), count=1, dtype='>u4')
99 if isinstance(stid, np.ndarray) and stid.shape == (1,):
100 stid = stid[0]
101 else:
102 raise ValueError('stid should be a single element np.ndarray')
103 stid = decode36(stid)
104 # get Date & Time
105 data = np.frombuffer(f.read(4), count=1, dtype='>u4')
106 starttime = decode_date_time(data)
107 # get data format
108 # get reserved, SPS, data type compression,
109 # number of 32bit records (num_records)
110 reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,
111 dtype='>u1')
112 compression = compress & 0b00000111 # get compression code
113 t_offset = compress >> 4 # get time offset
114 if t_offset > 0:
115 starttime = starttime + t_offset / TIME_OFFSETS_D[sps]
116 if sps in SPS_D:
117 sps = SPS_D[sps] # get special SPS value if needed
118 if not sps:
119 f.seek(num_records * 4, 1) # skip if not a data block
120 if 1008 - num_records * 4 > 0:
121 # keep skipping to get 1008 record
122 f.seek(1008 - num_records * 4, 1)
123 return None
124 npts = num_records * compression # number of samples
125 header = {}
126 header['starttime'] = starttime
127 header['station'] = stid[:-2]
128 header['channel'] = (channel_prefix[:2] + stid[-2]).upper()
129 header['sampling_rate'] = float(sps)
130 header['npts'] = npts
131 if headonly:
132 f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)
133 # skip to end of block if only partly filled with data
134 if 1000 - num_records * 4 > 0:
135 f.seek(1000 - num_records * 4, 1)
136 return header
137 else:
138 # get FIC
139 fic = np.frombuffer(f.read(4), count=1, dtype='>i4')
140 # get incremental data
141 data = np.frombuffer(f.read(4 * num_records), count=npts,
142 dtype=COMPRESSION_D[compression])
143 # construct time series
144 data = (fic + np.cumsum(data)).astype('i4')
145 # get RIC
146 ric = np.frombuffer(f.read(4), count=1, dtype='>i4')
147 # skip to end of block if only partly filled with data
148 if 1000 - num_records * 4 > 0:
149 f.seek(1000 - num_records * 4, 1)
150 # verify last data sample matches RIC
151 if not data[-1] == ric:
152 raise ValueError("Last sample mismatch with RIC")
153 return header, data
154
155
156 def read_header(f, **kwargs):
157 """
158 Reads header only from GCF file.
159 """
160 return read_data_block(f, headonly=True, **kwargs)
161
162
163 def read(f, **kwargs):
164 """
165 Reads header and data from GCF file.
166 """
167 return read_data_block(f, headonly=False, **kwargs)
168
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/obspy/io/gcf/libgcf.py b/obspy/io/gcf/libgcf.py
--- a/obspy/io/gcf/libgcf.py
+++ b/obspy/io/gcf/libgcf.py
@@ -5,6 +5,8 @@
# Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)
# more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro
# last access: June, 2016
+import struct
+
import numpy as np
from obspy import UTCDateTime
@@ -27,9 +29,10 @@
179: 8.,
181: 16.}
COMPRESSION_D = { # Table 3.2: format field to data type
- 1: '>i4',
- 2: '>i2',
- 4: '>i1'}
+ 1: 'i', # 4 bytes
+ 2: 'h', # 2 bytes
+ 4: 'b', # 1 byte
+}
def is_gcf(f):
@@ -86,29 +89,20 @@
sysid = f.read(4)
if not sysid:
raise EOFError # got to EOF
- sysid = np.frombuffer(sysid, count=1, dtype='>u4')
+ sysid, = struct.unpack('>I', sysid)
if sysid >> 31 & 0b1 > 0:
sysid = (sysid << 6) >> 6
- if isinstance(sysid, np.ndarray) and sysid.shape == (1,):
- sysid = sysid[0]
- else:
- raise ValueError('sysid should be a single element np.ndarray')
sysid = decode36(sysid)
# get Stream ID
- stid = np.frombuffer(f.read(4), count=1, dtype='>u4')
- if isinstance(stid, np.ndarray) and stid.shape == (1,):
- stid = stid[0]
- else:
- raise ValueError('stid should be a single element np.ndarray')
+ stid, = struct.unpack('>I', f.read(4))
stid = decode36(stid)
# get Date & Time
- data = np.frombuffer(f.read(4), count=1, dtype='>u4')
+ data, = struct.unpack('>I', f.read(4))
starttime = decode_date_time(data)
# get data format
# get reserved, SPS, data type compression,
# number of 32bit records (num_records)
- reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,
- dtype='>u1')
+ reserved, sps, compress, num_records = struct.unpack('>4B', f.read(4))
compression = compress & 0b00000111 # get compression code
t_offset = compress >> 4 # get time offset
if t_offset > 0:
@@ -136,14 +130,14 @@
return header
else:
# get FIC
- fic = np.frombuffer(f.read(4), count=1, dtype='>i4')
+ fic, = struct.unpack('>i', f.read(4))
# get incremental data
- data = np.frombuffer(f.read(4 * num_records), count=npts,
- dtype=COMPRESSION_D[compression])
+ data = struct.unpack(f'>{npts}{COMPRESSION_D[compression]}',
+ f.read(4 * num_records))
# construct time series
data = (fic + np.cumsum(data)).astype('i4')
# get RIC
- ric = np.frombuffer(f.read(4), count=1, dtype='>i4')
+ ric, = struct.unpack('>i', f.read(4))
# skip to end of block if only partly filled with data
if 1000 - num_records * 4 > 0:
f.seek(1000 - num_records * 4, 1)
|
{"golden_diff": "diff --git a/obspy/io/gcf/libgcf.py b/obspy/io/gcf/libgcf.py\n--- a/obspy/io/gcf/libgcf.py\n+++ b/obspy/io/gcf/libgcf.py\n@@ -5,6 +5,8 @@\n # Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)\n # more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro\n # last access: June, 2016\n+import struct\n+\n import numpy as np\n \n from obspy import UTCDateTime\n@@ -27,9 +29,10 @@\n 179: 8.,\n 181: 16.}\n COMPRESSION_D = { # Table 3.2: format field to data type\n- 1: '>i4',\n- 2: '>i2',\n- 4: '>i1'}\n+ 1: 'i', # 4 bytes\n+ 2: 'h', # 2 bytes\n+ 4: 'b', # 1 byte\n+}\n \n \n def is_gcf(f):\n@@ -86,29 +89,20 @@\n sysid = f.read(4)\n if not sysid:\n raise EOFError # got to EOF\n- sysid = np.frombuffer(sysid, count=1, dtype='>u4')\n+ sysid, = struct.unpack('>I', sysid)\n if sysid >> 31 & 0b1 > 0:\n sysid = (sysid << 6) >> 6\n- if isinstance(sysid, np.ndarray) and sysid.shape == (1,):\n- sysid = sysid[0]\n- else:\n- raise ValueError('sysid should be a single element np.ndarray')\n sysid = decode36(sysid)\n # get Stream ID\n- stid = np.frombuffer(f.read(4), count=1, dtype='>u4')\n- if isinstance(stid, np.ndarray) and stid.shape == (1,):\n- stid = stid[0]\n- else:\n- raise ValueError('stid should be a single element np.ndarray')\n+ stid, = struct.unpack('>I', f.read(4))\n stid = decode36(stid)\n # get Date & Time\n- data = np.frombuffer(f.read(4), count=1, dtype='>u4')\n+ data, = struct.unpack('>I', f.read(4))\n starttime = decode_date_time(data)\n # get data format\n # get reserved, SPS, data type compression,\n # number of 32bit records (num_records)\n- reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,\n- dtype='>u1')\n+ reserved, sps, compress, num_records = struct.unpack('>4B', f.read(4))\n compression = compress & 0b00000111 # get compression code\n t_offset = compress >> 4 # get time offset\n if t_offset > 0:\n@@ -136,14 +130,14 @@\n return header\n else:\n # get FIC\n- fic = np.frombuffer(f.read(4), count=1, dtype='>i4')\n+ fic, = struct.unpack('>i', f.read(4))\n # get incremental data\n- data = np.frombuffer(f.read(4 * num_records), count=npts,\n- dtype=COMPRESSION_D[compression])\n+ data = struct.unpack(f'>{npts}{COMPRESSION_D[compression]}',\n+ f.read(4 * num_records))\n # construct time series\n data = (fic + np.cumsum(data)).astype('i4')\n # get RIC\n- ric = np.frombuffer(f.read(4), count=1, dtype='>i4')\n+ ric, = struct.unpack('>i', f.read(4))\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n", "issue": "GCF read fails on windows (recent python + numpy 1.22)\nhttps://tests.obspy.org/115141/ & https://tests.obspy.org/115136/ on two independent machines show the error\r\n\r\nI suspect it's numpy-dtype related:\r\n\r\n```\r\nTraceback (most recent call last):\r\nFile \"C:\\Miniconda3\\envs\\test\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 57, in _wrapfunc\r\nreturn bound(*args, **kwds)\r\nTypeError: the resolved dtypes are not compatible with add.accumulate. Resolved (dtype('int32'), dtype('int32'), dtype('int32'))\r\nDuring handling of the above exception, another exception occurred:\r\nTraceback (most recent call last):\r\nFile \"D:\\a\\obspy\\obspy\\obspy\\io\\gcf\\tests\\test_core.py\", line 72, in test_read_via_module\r\nst = _read_gcf(filename)\r\nFile \"D:\\a\\obspy\\obspy\\obspy\\io\\gcf\\core.py\", line 89, in _read_gcf\r\nhd = libgcf.read(f, **kwargs)\r\nFile \"D:\\a\\obspy\\obspy\\obspy\\io\\gcf\\libgcf.py\", line 167, in read\r\nreturn read_data_block(f, headonly=False, **kwargs)\r\nFile \"D:\\a\\obspy\\obspy\\obspy\\io\\gcf\\libgcf.py\", line 144, in read_data_block\r\ndata = (fic + np.cumsum(data)).astype('i4')\r\nFile \"<__array_function__ internals>\", line 180, in cumsum\r\nFile \"C:\\Miniconda3\\envs\\test\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 2569, in cumsum\r\nreturn _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)\r\nFile \"C:\\Miniconda3\\envs\\test\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 66, in _wrapfunc\r\nreturn _wrapit(obj, method, *args, **kwds)\r\nFile \"C:\\Miniconda3\\envs\\test\\lib\\site-packages\\numpy\\core\\fromnumeric.py\", line 43, in _wrapit\r\nresult = getattr(asarray(obj), method)(*args, **kwds)\r\nTypeError: the resolved dtypes are not compatible with add.accumulate. Resolved (dtype('int32'), dtype('int32'), dtype('int32'))\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# reads Guralp Compressed Format (GCF) Files\n# By Ran Novitsky Nof @ BSL, 2016\n# [email protected]\n# Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)\n# more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro\n# last access: June, 2016\nimport numpy as np\n\nfrom obspy import UTCDateTime\n\nSPS_D = { # Table 3.1: special sample rates\n 157: 0.1,\n 161: 0.125,\n 162: 0.2,\n 164: 0.25,\n 167: 0.5,\n 171: 400,\n 174: 500,\n 176: 1000,\n 179: 2000,\n 181: 4000}\nTIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator\n 171: 8.,\n 174: 2.,\n 176: 4.,\n 179: 8.,\n 181: 16.}\nCOMPRESSION_D = { # Table 3.2: format field to data type\n 1: '>i4',\n 2: '>i2',\n 4: '>i1'}\n\n\ndef is_gcf(f):\n \"\"\"\n Test if file is GCF by reading at least 1 data block\n \"\"\"\n header, data = read_data_block(f)\n\n\ndef decode36(data):\n \"\"\"\n Converts an integer into a base36 string.\n \"\"\"\n # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm\n s = ''\n while data:\n imed = data % 36\n if imed > 9:\n pos = imed - 10 + ord('A')\n else:\n pos = imed + ord('0')\n c = chr(pos)\n s = c + s\n data = data // 36\n return s\n\n\ndef decode_date_time(data):\n \"\"\"\n Decode date and time field.\n\n The date code is a 32 bit value specifying the start time of the block.\n Bits 0-16 contain the number of seconds since midnight,\n and bits 17-31 the number of days since 17th November 1989.\n \"\"\"\n # prevent numpy array\n days = int(data >> 17)\n secs = int(data & 0x1FFFF)\n starttime = UTCDateTime('1989-11-17') + days * 86400 + secs\n return starttime\n\n\ndef read_data_block(f, headonly=False, channel_prefix=\"HH\", **kwargs):\n \"\"\"\n Read one data block from GCF file.\n\n more details can be found here:\n http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm\n f - file object to read from\n if skipData is True, Only header is returned.\n if not a data block (SPS=0) - returns None.\n \"\"\"\n # get ID\n sysid = f.read(4)\n if not sysid:\n raise EOFError # got to EOF\n sysid = np.frombuffer(sysid, count=1, dtype='>u4')\n if sysid >> 31 & 0b1 > 0:\n sysid = (sysid << 6) >> 6\n if isinstance(sysid, np.ndarray) and sysid.shape == (1,):\n sysid = sysid[0]\n else:\n raise ValueError('sysid should be a single element np.ndarray')\n sysid = decode36(sysid)\n # get Stream ID\n stid = np.frombuffer(f.read(4), count=1, dtype='>u4')\n if isinstance(stid, np.ndarray) and stid.shape == (1,):\n stid = stid[0]\n else:\n raise ValueError('stid should be a single element np.ndarray')\n stid = decode36(stid)\n # get Date & Time\n data = np.frombuffer(f.read(4), count=1, dtype='>u4')\n starttime = decode_date_time(data)\n # get data format\n # get reserved, SPS, data type compression,\n # number of 32bit records (num_records)\n reserved, sps, compress, num_records = np.frombuffer(f.read(4), count=4,\n dtype='>u1')\n compression = compress & 0b00000111 # get compression code\n t_offset = compress >> 4 # get time offset\n if t_offset > 0:\n starttime = starttime + t_offset / TIME_OFFSETS_D[sps]\n if sps in SPS_D:\n sps = SPS_D[sps] # get special SPS value if needed\n if not sps:\n f.seek(num_records * 4, 1) # skip if not a data block\n if 1008 - num_records * 4 > 0:\n # keep skipping to get 1008 record\n f.seek(1008 - num_records * 4, 1)\n return None\n npts = num_records * compression # number of samples\n header = {}\n header['starttime'] = starttime\n header['station'] = stid[:-2]\n header['channel'] = (channel_prefix[:2] + stid[-2]).upper()\n header['sampling_rate'] = float(sps)\n header['npts'] = npts\n if headonly:\n f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n return header\n else:\n # get FIC\n fic = np.frombuffer(f.read(4), count=1, dtype='>i4')\n # get incremental data\n data = np.frombuffer(f.read(4 * num_records), count=npts,\n dtype=COMPRESSION_D[compression])\n # construct time series\n data = (fic + np.cumsum(data)).astype('i4')\n # get RIC\n ric = np.frombuffer(f.read(4), count=1, dtype='>i4')\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n # verify last data sample matches RIC\n if not data[-1] == ric:\n raise ValueError(\"Last sample mismatch with RIC\")\n return header, data\n\n\ndef read_header(f, **kwargs):\n \"\"\"\n Reads header only from GCF file.\n \"\"\"\n return read_data_block(f, headonly=True, **kwargs)\n\n\ndef read(f, **kwargs):\n \"\"\"\n Reads header and data from GCF file.\n \"\"\"\n return read_data_block(f, headonly=False, **kwargs)\n", "path": "obspy/io/gcf/libgcf.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# reads Guralp Compressed Format (GCF) Files\n# By Ran Novitsky Nof @ BSL, 2016\n# [email protected]\n# Based on Guralp's GCF reference (GCF-RFC-GCFR, Issue C, 2011-01-05)\n# more details available from: http://www.guralp.com/apps/ok?doc=GCF_Intro\n# last access: June, 2016\nimport struct\n\nimport numpy as np\n\nfrom obspy import UTCDateTime\n\nSPS_D = { # Table 3.1: special sample rates\n 157: 0.1,\n 161: 0.125,\n 162: 0.2,\n 164: 0.25,\n 167: 0.5,\n 171: 400,\n 174: 500,\n 176: 1000,\n 179: 2000,\n 181: 4000}\nTIME_OFFSETS_D = { # Table 3.1: Time fractional offset denominator\n 171: 8.,\n 174: 2.,\n 176: 4.,\n 179: 8.,\n 181: 16.}\nCOMPRESSION_D = { # Table 3.2: format field to data type\n 1: 'i', # 4 bytes\n 2: 'h', # 2 bytes\n 4: 'b', # 1 byte\n}\n\n\ndef is_gcf(f):\n \"\"\"\n Test if file is GCF by reading at least 1 data block\n \"\"\"\n header, data = read_data_block(f)\n\n\ndef decode36(data):\n \"\"\"\n Converts an integer into a base36 string.\n \"\"\"\n # http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/Decoding_Base_36_numbers_C.htm\n s = ''\n while data:\n imed = data % 36\n if imed > 9:\n pos = imed - 10 + ord('A')\n else:\n pos = imed + ord('0')\n c = chr(pos)\n s = c + s\n data = data // 36\n return s\n\n\ndef decode_date_time(data):\n \"\"\"\n Decode date and time field.\n\n The date code is a 32 bit value specifying the start time of the block.\n Bits 0-16 contain the number of seconds since midnight,\n and bits 17-31 the number of days since 17th November 1989.\n \"\"\"\n # prevent numpy array\n days = int(data >> 17)\n secs = int(data & 0x1FFFF)\n starttime = UTCDateTime('1989-11-17') + days * 86400 + secs\n return starttime\n\n\ndef read_data_block(f, headonly=False, channel_prefix=\"HH\", **kwargs):\n \"\"\"\n Read one data block from GCF file.\n\n more details can be found here:\n http://geophysics.eas.gatech.edu/GTEQ/Scream4.4/GCF_Specification.htm\n f - file object to read from\n if skipData is True, Only header is returned.\n if not a data block (SPS=0) - returns None.\n \"\"\"\n # get ID\n sysid = f.read(4)\n if not sysid:\n raise EOFError # got to EOF\n sysid, = struct.unpack('>I', sysid)\n if sysid >> 31 & 0b1 > 0:\n sysid = (sysid << 6) >> 6\n sysid = decode36(sysid)\n # get Stream ID\n stid, = struct.unpack('>I', f.read(4))\n stid = decode36(stid)\n # get Date & Time\n data, = struct.unpack('>I', f.read(4))\n starttime = decode_date_time(data)\n # get data format\n # get reserved, SPS, data type compression,\n # number of 32bit records (num_records)\n reserved, sps, compress, num_records = struct.unpack('>4B', f.read(4))\n compression = compress & 0b00000111 # get compression code\n t_offset = compress >> 4 # get time offset\n if t_offset > 0:\n starttime = starttime + t_offset / TIME_OFFSETS_D[sps]\n if sps in SPS_D:\n sps = SPS_D[sps] # get special SPS value if needed\n if not sps:\n f.seek(num_records * 4, 1) # skip if not a data block\n if 1008 - num_records * 4 > 0:\n # keep skipping to get 1008 record\n f.seek(1008 - num_records * 4, 1)\n return None\n npts = num_records * compression # number of samples\n header = {}\n header['starttime'] = starttime\n header['station'] = stid[:-2]\n header['channel'] = (channel_prefix[:2] + stid[-2]).upper()\n header['sampling_rate'] = float(sps)\n header['npts'] = npts\n if headonly:\n f.seek(4 * (num_records + 2), 1) # skip data part (inc. FIC and RIC)\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n return header\n else:\n # get FIC\n fic, = struct.unpack('>i', f.read(4))\n # get incremental data\n data = struct.unpack(f'>{npts}{COMPRESSION_D[compression]}',\n f.read(4 * num_records))\n # construct time series\n data = (fic + np.cumsum(data)).astype('i4')\n # get RIC\n ric, = struct.unpack('>i', f.read(4))\n # skip to end of block if only partly filled with data\n if 1000 - num_records * 4 > 0:\n f.seek(1000 - num_records * 4, 1)\n # verify last data sample matches RIC\n if not data[-1] == ric:\n raise ValueError(\"Last sample mismatch with RIC\")\n return header, data\n\n\ndef read_header(f, **kwargs):\n \"\"\"\n Reads header only from GCF file.\n \"\"\"\n return read_data_block(f, headonly=True, **kwargs)\n\n\ndef read(f, **kwargs):\n \"\"\"\n Reads header and data from GCF file.\n \"\"\"\n return read_data_block(f, headonly=False, **kwargs)\n", "path": "obspy/io/gcf/libgcf.py"}]}
| 2,945 | 970 |
gh_patches_debug_53690
|
rasdani/github-patches
|
git_diff
|
sql-machine-learning__elasticdl-2180
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Elasticdl client crashes with invalid args
```
$ elasticdl -v
Traceback (most recent call last):
File "/usr/local/bin/elasticdl", line 8, in <module>
sys.exit(main())
File "/usr/local/lib/python3.7/site-packages/elasticdl_client/main.py", line 97, in main
args, _ = parser.parse_known_args()
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py", line 1787, in parse_known_args
namespace, args = self._parse_known_args(args, namespace)
File "/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py", line 2022, in _parse_known_args
', '.join(required_actions))
TypeError: sequence item 0: expected str instance, NoneType found
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `elasticdl_client/main.py`
Content:
```
1 # Copyright 2020 The ElasticDL Authors. All rights reserved.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at
5 #
6 # http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 import argparse
15 import sys
16
17 from elasticdl_client.api import (
18 build_zoo,
19 evaluate,
20 init_zoo,
21 predict,
22 push_zoo,
23 train,
24 )
25 from elasticdl_client.common import args
26
27
28 def build_argument_parser():
29 parser = argparse.ArgumentParser()
30 subparsers = parser.add_subparsers()
31 subparsers.required = True
32
33 # Initialize the parser for the `elasticdl zoo` commands
34 zoo_parser = subparsers.add_parser(
35 "zoo",
36 help="Initialize | Build | Push a docker image for the model zoo.",
37 )
38 zoo_subparsers = zoo_parser.add_subparsers()
39 zoo_subparsers.required = True
40
41 # elasticdl zoo init
42 zoo_init_parser = zoo_subparsers.add_parser(
43 "init", help="Initialize the model zoo."
44 )
45 zoo_init_parser.set_defaults(func=init_zoo)
46 args.add_zoo_init_params(zoo_init_parser)
47
48 # elasticdl zoo build
49 zoo_build_parser = zoo_subparsers.add_parser(
50 "build", help="Build a docker image for the model zoo."
51 )
52 zoo_build_parser.set_defaults(func=build_zoo)
53 args.add_zoo_build_params(zoo_build_parser)
54
55 # elasticdl zoo push
56 zoo_push_parser = zoo_subparsers.add_parser(
57 "push",
58 help="Push the docker image to a remote registry for the distributed"
59 "ElasticDL job.",
60 )
61 zoo_push_parser.set_defaults(func=push_zoo)
62 args.add_zoo_push_params(zoo_push_parser)
63
64 # elasticdl train
65 train_parser = subparsers.add_parser(
66 "train", help="Submit a ElasticDL distributed training job"
67 )
68 train_parser.set_defaults(func=train)
69 args.add_common_params(train_parser)
70 args.add_train_params(train_parser)
71
72 # elasticdl evaluate
73 evaluate_parser = subparsers.add_parser(
74 "evaluate", help="Submit a ElasticDL distributed evaluation job"
75 )
76 evaluate_parser.set_defaults(func=evaluate)
77 args.add_common_params(evaluate_parser)
78 args.add_evaluate_params(evaluate_parser)
79
80 # elasticdl predict
81 predict_parser = subparsers.add_parser(
82 "predict", help="Submit a ElasticDL distributed prediction job"
83 )
84 predict_parser.set_defaults(func=predict)
85 args.add_common_params(predict_parser)
86 args.add_predict_params(predict_parser)
87
88 return parser
89
90
91 def main():
92 parser = build_argument_parser()
93 if len(sys.argv) == 1:
94 parser.print_help(sys.stderr)
95 sys.exit(1)
96
97 args, _ = parser.parse_known_args()
98 args.func(args)
99
100
101 if __name__ == "__main__":
102 main()
103
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/elasticdl_client/main.py b/elasticdl_client/main.py
--- a/elasticdl_client/main.py
+++ b/elasticdl_client/main.py
@@ -94,7 +94,12 @@
parser.print_help(sys.stderr)
sys.exit(1)
- args, _ = parser.parse_known_args()
+ try:
+ args, _ = parser.parse_known_args()
+ except TypeError:
+ parser.print_help(sys.stderr)
+ sys.exit(1)
+
args.func(args)
|
{"golden_diff": "diff --git a/elasticdl_client/main.py b/elasticdl_client/main.py\n--- a/elasticdl_client/main.py\n+++ b/elasticdl_client/main.py\n@@ -94,7 +94,12 @@\n parser.print_help(sys.stderr)\n sys.exit(1)\n \n- args, _ = parser.parse_known_args()\n+ try:\n+ args, _ = parser.parse_known_args()\n+ except TypeError:\n+ parser.print_help(sys.stderr)\n+ sys.exit(1)\n+\n args.func(args)\n", "issue": "Elasticdl client crashes with invalid args\n```\r\n$ elasticdl -v\r\nTraceback (most recent call last):\r\n File \"/usr/local/bin/elasticdl\", line 8, in <module>\r\n sys.exit(main())\r\n File \"/usr/local/lib/python3.7/site-packages/elasticdl_client/main.py\", line 97, in main\r\n args, _ = parser.parse_known_args()\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py\", line 1787, in parse_known_args\r\n namespace, args = self._parse_known_args(args, namespace)\r\n File \"/usr/local/Cellar/python/3.7.7/Frameworks/Python.framework/Versions/3.7/lib/python3.7/argparse.py\", line 2022, in _parse_known_args\r\n ', '.join(required_actions))\r\nTypeError: sequence item 0: expected str instance, NoneType found\r\n```\n", "before_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\nfrom elasticdl_client.api import (\n build_zoo,\n evaluate,\n init_zoo,\n predict,\n push_zoo,\n train,\n)\nfrom elasticdl_client.common import args\n\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n subparsers.required = True\n\n # Initialize the parser for the `elasticdl zoo` commands\n zoo_parser = subparsers.add_parser(\n \"zoo\",\n help=\"Initialize | Build | Push a docker image for the model zoo.\",\n )\n zoo_subparsers = zoo_parser.add_subparsers()\n zoo_subparsers.required = True\n\n # elasticdl zoo init\n zoo_init_parser = zoo_subparsers.add_parser(\n \"init\", help=\"Initialize the model zoo.\"\n )\n zoo_init_parser.set_defaults(func=init_zoo)\n args.add_zoo_init_params(zoo_init_parser)\n\n # elasticdl zoo build\n zoo_build_parser = zoo_subparsers.add_parser(\n \"build\", help=\"Build a docker image for the model zoo.\"\n )\n zoo_build_parser.set_defaults(func=build_zoo)\n args.add_zoo_build_params(zoo_build_parser)\n\n # elasticdl zoo push\n zoo_push_parser = zoo_subparsers.add_parser(\n \"push\",\n help=\"Push the docker image to a remote registry for the distributed\"\n \"ElasticDL job.\",\n )\n zoo_push_parser.set_defaults(func=push_zoo)\n args.add_zoo_push_params(zoo_push_parser)\n\n # elasticdl train\n train_parser = subparsers.add_parser(\n \"train\", help=\"Submit a ElasticDL distributed training job\"\n )\n train_parser.set_defaults(func=train)\n args.add_common_params(train_parser)\n args.add_train_params(train_parser)\n\n # elasticdl evaluate\n evaluate_parser = subparsers.add_parser(\n \"evaluate\", help=\"Submit a ElasticDL distributed evaluation job\"\n )\n evaluate_parser.set_defaults(func=evaluate)\n args.add_common_params(evaluate_parser)\n args.add_evaluate_params(evaluate_parser)\n\n # elasticdl predict\n predict_parser = subparsers.add_parser(\n \"predict\", help=\"Submit a ElasticDL distributed prediction job\"\n )\n predict_parser.set_defaults(func=predict)\n args.add_common_params(predict_parser)\n args.add_predict_params(predict_parser)\n\n return parser\n\n\ndef main():\n parser = build_argument_parser()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args, _ = parser.parse_known_args()\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl_client/main.py"}], "after_files": [{"content": "# Copyright 2020 The ElasticDL Authors. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport sys\n\nfrom elasticdl_client.api import (\n build_zoo,\n evaluate,\n init_zoo,\n predict,\n push_zoo,\n train,\n)\nfrom elasticdl_client.common import args\n\n\ndef build_argument_parser():\n parser = argparse.ArgumentParser()\n subparsers = parser.add_subparsers()\n subparsers.required = True\n\n # Initialize the parser for the `elasticdl zoo` commands\n zoo_parser = subparsers.add_parser(\n \"zoo\",\n help=\"Initialize | Build | Push a docker image for the model zoo.\",\n )\n zoo_subparsers = zoo_parser.add_subparsers()\n zoo_subparsers.required = True\n\n # elasticdl zoo init\n zoo_init_parser = zoo_subparsers.add_parser(\n \"init\", help=\"Initialize the model zoo.\"\n )\n zoo_init_parser.set_defaults(func=init_zoo)\n args.add_zoo_init_params(zoo_init_parser)\n\n # elasticdl zoo build\n zoo_build_parser = zoo_subparsers.add_parser(\n \"build\", help=\"Build a docker image for the model zoo.\"\n )\n zoo_build_parser.set_defaults(func=build_zoo)\n args.add_zoo_build_params(zoo_build_parser)\n\n # elasticdl zoo push\n zoo_push_parser = zoo_subparsers.add_parser(\n \"push\",\n help=\"Push the docker image to a remote registry for the distributed\"\n \"ElasticDL job.\",\n )\n zoo_push_parser.set_defaults(func=push_zoo)\n args.add_zoo_push_params(zoo_push_parser)\n\n # elasticdl train\n train_parser = subparsers.add_parser(\n \"train\", help=\"Submit a ElasticDL distributed training job\"\n )\n train_parser.set_defaults(func=train)\n args.add_common_params(train_parser)\n args.add_train_params(train_parser)\n\n # elasticdl evaluate\n evaluate_parser = subparsers.add_parser(\n \"evaluate\", help=\"Submit a ElasticDL distributed evaluation job\"\n )\n evaluate_parser.set_defaults(func=evaluate)\n args.add_common_params(evaluate_parser)\n args.add_evaluate_params(evaluate_parser)\n\n # elasticdl predict\n predict_parser = subparsers.add_parser(\n \"predict\", help=\"Submit a ElasticDL distributed prediction job\"\n )\n predict_parser.set_defaults(func=predict)\n args.add_common_params(predict_parser)\n args.add_predict_params(predict_parser)\n\n return parser\n\n\ndef main():\n parser = build_argument_parser()\n if len(sys.argv) == 1:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n try:\n args, _ = parser.parse_known_args()\n except TypeError:\n parser.print_help(sys.stderr)\n sys.exit(1)\n\n args.func(args)\n\n\nif __name__ == \"__main__\":\n main()\n", "path": "elasticdl_client/main.py"}]}
| 1,386 | 115 |
gh_patches_debug_7059
|
rasdani/github-patches
|
git_diff
|
modin-project__modin-6283
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Refactor ci.yml to reduce the amount of copy-pasting
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 from setuptools import setup, find_packages
2 import versioneer
3
4 with open("README.md", "r", encoding="utf-8") as fh:
5 long_description = fh.read()
6
7 dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
8 ray_deps = ["ray[default]>=1.13.0", "pyarrow"]
9 unidist_deps = ["unidist[mpi]>=0.2.1"]
10 remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
11 spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
12 sql_deps = ["dfsql>=0.4.2", "pyparsing<=2.4.7"]
13 all_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps
14
15 # Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.
16 # This file provides the "import pandas before Ray init" feature if specific
17 # environment variable is set (see https://github.com/modin-project/modin/issues/4564).
18 cmdclass = versioneer.get_cmdclass()
19 extra_files = ["modin-autoimport-pandas.pth"]
20
21
22 class AddPthFileBuild(cmdclass["build_py"]):
23 def _get_data_files(self):
24 return (super()._get_data_files() or []) + [
25 (".", ".", self.build_lib, extra_files)
26 ]
27
28
29 class AddPthFileSDist(cmdclass["sdist"]):
30 def make_distribution(self):
31 self.filelist.extend(extra_files)
32 return super().make_distribution()
33
34
35 cmdclass["build_py"] = AddPthFileBuild
36 cmdclass["sdist"] = AddPthFileSDist
37
38 setup(
39 name="modin",
40 version=versioneer.get_version(),
41 cmdclass=cmdclass,
42 description="Modin: Make your pandas code run faster by changing one line of code.",
43 packages=find_packages(exclude=["scripts", "scripts.*"]),
44 include_package_data=True,
45 license="Apache 2",
46 url="https://github.com/modin-project/modin",
47 long_description=long_description,
48 long_description_content_type="text/markdown",
49 install_requires=[
50 "pandas>=2,<2.1",
51 "packaging",
52 "numpy>=1.18.5",
53 "fsspec",
54 "psutil",
55 ],
56 extras_require={
57 # can be installed by pip install modin[dask]
58 "dask": dask_deps,
59 "ray": ray_deps,
60 "unidist": unidist_deps,
61 "remote": remote_deps,
62 "spreadsheet": spreadsheet_deps,
63 "sql": sql_deps,
64 "all": all_deps,
65 },
66 python_requires=">=3.8",
67 )
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -5,7 +5,8 @@
long_description = fh.read()
dask_deps = ["dask>=2.22.0", "distributed>=2.22.0"]
-ray_deps = ["ray[default]>=1.13.0", "pyarrow"]
+# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100
+ray_deps = ["ray[default]>=1.13.0,!=2.5.0", "pyarrow"]
unidist_deps = ["unidist[mpi]>=0.2.1"]
remote_deps = ["rpyc==4.1.5", "cloudpickle", "boto3"]
spreadsheet_deps = ["modin-spreadsheet>=0.1.0"]
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -5,7 +5,8 @@\n long_description = fh.read()\n \n dask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n-ray_deps = [\"ray[default]>=1.13.0\", \"pyarrow\"]\n+# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\n+ray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\n unidist_deps = [\"unidist[mpi]>=0.2.1\"]\n remote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\n spreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\n", "issue": "Refactor ci.yml to reduce the amount of copy-pasting\n\n", "before_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\nray_deps = [\"ray[default]>=1.13.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}], "after_files": [{"content": "from setuptools import setup, find_packages\nimport versioneer\n\nwith open(\"README.md\", \"r\", encoding=\"utf-8\") as fh:\n long_description = fh.read()\n\ndask_deps = [\"dask>=2.22.0\", \"distributed>=2.22.0\"]\n# ray==2.5.0 broken: https://github.com/conda-forge/ray-packages-feedstock/issues/100\nray_deps = [\"ray[default]>=1.13.0,!=2.5.0\", \"pyarrow\"]\nunidist_deps = [\"unidist[mpi]>=0.2.1\"]\nremote_deps = [\"rpyc==4.1.5\", \"cloudpickle\", \"boto3\"]\nspreadsheet_deps = [\"modin-spreadsheet>=0.1.0\"]\nsql_deps = [\"dfsql>=0.4.2\", \"pyparsing<=2.4.7\"]\nall_deps = dask_deps + ray_deps + unidist_deps + remote_deps + spreadsheet_deps\n\n# Distribute 'modin-autoimport-pandas.pth' along with binary and source distributions.\n# This file provides the \"import pandas before Ray init\" feature if specific\n# environment variable is set (see https://github.com/modin-project/modin/issues/4564).\ncmdclass = versioneer.get_cmdclass()\nextra_files = [\"modin-autoimport-pandas.pth\"]\n\n\nclass AddPthFileBuild(cmdclass[\"build_py\"]):\n def _get_data_files(self):\n return (super()._get_data_files() or []) + [\n (\".\", \".\", self.build_lib, extra_files)\n ]\n\n\nclass AddPthFileSDist(cmdclass[\"sdist\"]):\n def make_distribution(self):\n self.filelist.extend(extra_files)\n return super().make_distribution()\n\n\ncmdclass[\"build_py\"] = AddPthFileBuild\ncmdclass[\"sdist\"] = AddPthFileSDist\n\nsetup(\n name=\"modin\",\n version=versioneer.get_version(),\n cmdclass=cmdclass,\n description=\"Modin: Make your pandas code run faster by changing one line of code.\",\n packages=find_packages(exclude=[\"scripts\", \"scripts.*\"]),\n include_package_data=True,\n license=\"Apache 2\",\n url=\"https://github.com/modin-project/modin\",\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n install_requires=[\n \"pandas>=2,<2.1\",\n \"packaging\",\n \"numpy>=1.18.5\",\n \"fsspec\",\n \"psutil\",\n ],\n extras_require={\n # can be installed by pip install modin[dask]\n \"dask\": dask_deps,\n \"ray\": ray_deps,\n \"unidist\": unidist_deps,\n \"remote\": remote_deps,\n \"spreadsheet\": spreadsheet_deps,\n \"sql\": sql_deps,\n \"all\": all_deps,\n },\n python_requires=\">=3.8\",\n)\n", "path": "setup.py"}]}
| 1,005 | 195 |
gh_patches_debug_1143
|
rasdani/github-patches
|
git_diff
|
akvo__akvo-rsr-3132
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
When logged in landing page should be "myRSR"
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `akvo/rsr/views/__init__.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Akvo RSR is covered by the GNU Affero General Public License.
4
5 See more details in the license.txt file located at the root folder of the Akvo RSR module.
6 For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
7 """
8
9 from django.core.urlresolvers import reverse
10 from django.http import HttpResponseRedirect
11
12
13 def index(request):
14 """."""
15 return HttpResponseRedirect(reverse('project-directory', args=[]))
16
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py
--- a/akvo/rsr/views/__init__.py
+++ b/akvo/rsr/views/__init__.py
@@ -11,5 +11,7 @@
def index(request):
- """."""
- return HttpResponseRedirect(reverse('project-directory', args=[]))
+ """Redirect user to project directory or My RSR."""
+
+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'
+ return HttpResponseRedirect(reverse(redirect_url, args=[]))
|
{"golden_diff": "diff --git a/akvo/rsr/views/__init__.py b/akvo/rsr/views/__init__.py\n--- a/akvo/rsr/views/__init__.py\n+++ b/akvo/rsr/views/__init__.py\n@@ -11,5 +11,7 @@\n \n \n def index(request):\n- \"\"\".\"\"\"\n- return HttpResponseRedirect(reverse('project-directory', args=[]))\n+ \"\"\"Redirect user to project directory or My RSR.\"\"\"\n+\n+ redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'\n+ return HttpResponseRedirect(reverse(redirect_url, args=[]))\n", "issue": "When logged in landing page should be \"myRSR\"\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\".\"\"\"\n return HttpResponseRedirect(reverse('project-directory', args=[]))\n", "path": "akvo/rsr/views/__init__.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Akvo RSR is covered by the GNU Affero General Public License.\n\nSee more details in the license.txt file located at the root folder of the Akvo RSR module.\nFor additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\"\"\"\n\nfrom django.core.urlresolvers import reverse\nfrom django.http import HttpResponseRedirect\n\n\ndef index(request):\n \"\"\"Redirect user to project directory or My RSR.\"\"\"\n\n redirect_url = 'project-directory' if request.user.is_anonymous() else 'my_rsr'\n return HttpResponseRedirect(reverse(redirect_url, args=[]))\n", "path": "akvo/rsr/views/__init__.py"}]}
| 402 | 133 |
gh_patches_debug_23977
|
rasdani/github-patches
|
git_diff
|
sktime__sktime-6005
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] `SARIMAX` fails when `X` is passed in `predict` but not used in `fit`
## Minimal Reproducible Example
```pycon
>>>
>>> from sktime.datasets import load_longley
>>> from sktime.forecasting.sarimax import SARIMAX
>>> from sktime.split import temporal_train_test_split
>>>
>>> y, X = load_longley()
>>>
>>> y_train, _, _, X_test = temporal_train_test_split(y, X)
>>>
>>> forecaster = SARIMAX()
>>>
>>> forecaster.fit(y_train)
SARIMAX()
>>>
>>> # works
>>> forecaster.predict(fh=[1, 2, 3, 4])
1959 66061.176439
1960 65682.034815
1961 65363.883253
1962 65096.910677
Freq: A-DEC, Name: TOTEMP, dtype: float64
>>>
>>> # fails
>>> forecaster.predict(fh=[1, 2, 3, 4], X=X_test)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/anirban/sktime-fork/sktime/forecasting/base/_base.py", line 412, in predict
y_pred = self._predict(fh=fh, X=X_inner)
File "/home/anirban/sktime-fork/sktime/forecasting/base/adapters/_statsmodels.py", line 108, in _predict
ind_drop = self._X.index
AttributeError: 'NoneType' object has no attribute 'index'
>>>
```
## Expectation
I was expecting no failures, and identical behaviour in both cases. I do get that behaviour if I try with `sktime.forecasting.arima.ARIMA`.
## Version
Operating System: Ubuntu 22.04.3 LTS (WSL)
Python: 3.10.12
Sktime: e51ec2472a
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `sktime/forecasting/base/adapters/_statsmodels.py`
Content:
```
1 # !/usr/bin/env python3 -u
2 # copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
3 """Implements adapter for statsmodels forecasters to be used in sktime framework."""
4
5 __author__ = ["mloning", "ciaran-g"]
6 __all__ = ["_StatsModelsAdapter"]
7
8 import inspect
9
10 import numpy as np
11 import pandas as pd
12
13 from sktime.forecasting.base import BaseForecaster
14 from sktime.utils.warnings import warn
15
16
17 class _StatsModelsAdapter(BaseForecaster):
18 """Base class for interfacing statsmodels forecasting algorithms."""
19
20 _fitted_param_names = ()
21 _tags = {
22 # packaging info
23 # --------------
24 "authors": ["mloning", "ciaran-g"],
25 "maintainers": ["ciaran-g"],
26 "python_dependencies": "statsmodels",
27 # estimator type
28 # --------------
29 "ignores-exogeneous-X": True,
30 "requires-fh-in-fit": False,
31 "handles-missing-data": False,
32 }
33
34 def __init__(self, random_state=None):
35 self._forecaster = None
36 self.random_state = random_state
37 self._fitted_forecaster = None
38 super().__init__()
39
40 def _fit(self, y, X, fh):
41 """Fit to training data.
42
43 Parameters
44 ----------
45 y : pd.Series
46 Target time series to which to fit the forecaster.
47 fh : int, list or np.array, optional (default=None)
48 The forecasters horizon with the steps ahead to to predict.
49 X : pd.DataFrame, optional (default=None)
50 Exogenous variables are ignored
51
52 Returns
53 -------
54 self : returns an instance of self.
55 """
56 # statsmodels does not support the pd.Int64Index as required,
57 # so we coerce them here to pd.RangeIndex
58 if isinstance(y, pd.Series) and pd.api.types.is_integer_dtype(y.index):
59 y, X = _coerce_int_to_range_index(y, X)
60 self._fit_forecaster(y, X)
61 return self
62
63 def _fit_forecaster(self, y_train, X_train=None):
64 """Log used internally in fit."""
65 raise NotImplementedError("abstract method")
66
67 def _update(self, y, X=None, update_params=True):
68 """Update used internally in update."""
69 if update_params or self.is_composite():
70 super()._update(y, X, update_params=update_params)
71 else:
72 if not hasattr(self._fitted_forecaster, "append"):
73 warn(
74 f"NotImplementedWarning: {self.__class__.__name__} "
75 f"can not accept new data when update_params=False. "
76 f"Call with update_params=True to refit with new data.",
77 obj=self,
78 )
79 else:
80 # only append unseen data to fitted forecaster
81 index_diff = y.index.difference(
82 self._fitted_forecaster.fittedvalues.index
83 )
84 if index_diff.isin(y.index).all():
85 y = y.loc[index_diff]
86 self._fitted_forecaster = self._fitted_forecaster.append(y)
87
88 def _predict(self, fh, X):
89 """Make forecasts.
90
91 Parameters
92 ----------
93 fh : ForecastingHorizon
94 The forecasters horizon with the steps ahead to to predict.
95 Default is one-step ahead forecast,
96 i.e. np.array([1])
97 X : pd.DataFrame, optional (default=None)
98 Exogenous variables are ignored.
99
100 Returns
101 -------
102 y_pred : pd.Series
103 Returns series of predicted values.
104 """
105 # statsmodels requires zero-based indexing starting at the
106 # beginning of the training series when passing integers
107 start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
108 fh_int = fh.to_absolute_int(self._y.index[0], self.cutoff) - len(self._y)
109
110 # bug fix for evaluate function as test_plus_train indices are passed
111 # statsmodels exog must contain test indices only.
112 # For discussion see https://github.com/sktime/sktime/issues/3830
113 if X is not None:
114 ind_drop = self._X.index
115 X = X.loc[~X.index.isin(ind_drop)]
116 # Entire range of the forecast horizon is required
117 X = X.iloc[: (fh_int[-1] + 1)] # include end point
118
119 if "exog" in inspect.signature(self._forecaster.__init__).parameters.keys():
120 y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)
121 else:
122 y_pred = self._fitted_forecaster.predict(start=start, end=end)
123
124 # statsmodels forecasts all periods from start to end of forecasting
125 # horizon, but only return given time points in forecasting horizon
126 # if fh[0] > 1 steps ahead of cutoff then make relative to `start`
127 fh_int = fh_int - fh_int[0]
128 y_pred = y_pred.iloc[fh_int]
129 # ensure that name is not added nor removed
130 # otherwise this may upset conversion to pd.DataFrame
131 y_pred.name = self._y.name
132 return y_pred
133
134 @staticmethod
135 def _extract_conf_int(prediction_results, alpha) -> pd.DataFrame:
136 """Construct confidence interval at specified `alpha` for each timestep.
137
138 Parameters
139 ----------
140 prediction_results : PredictionResults
141 results class, as returned by ``self._fitted_forecaster.get_prediction``
142 alpha : float
143 one minus nominal coverage
144
145 Returns
146 -------
147 pd.DataFrame
148 confidence intervals at each timestep
149
150 The dataframe must have at least two columns ``lower`` and ``upper``, and
151 the row indices must be integers relative to ``self.cutoff``. Order of
152 columns do not matter, and row indices must be a superset of relative
153 integer horizon of ``fh``.
154 """
155 del prediction_results, alpha # tools like ``vulture`` may complain as unused
156
157 raise NotImplementedError("abstract method")
158
159 def _predict_interval(self, fh, X, coverage):
160 """Compute/return prediction interval forecasts.
161
162 private _predict_interval containing the core logic,
163 called from predict_interval and default _predict_quantiles
164
165 Parameters
166 ----------
167 fh : guaranteed to be ForecastingHorizon
168 The forecasting horizon with the steps ahead to to predict.
169 X : optional (default=None)
170 guaranteed to be of a type in self.get_tag("X_inner_mtype")
171 Exogeneous time series to predict from.
172 coverage : float or list of float, optional (default=0.95)
173 nominal coverage(s) of predictive interval(s)
174
175 Returns
176 -------
177 pred_int : pd.DataFrame
178 Column has multi-index: first level is variable name from y in fit,
179 second level coverage fractions for which intervals were computed.
180 in the same order as in input `coverage`.
181 Third level is string "lower" or "upper", for lower/upper interval end.
182 Row index is fh, with additional (upper) levels equal to instance levels,
183 from y seen in fit, if y_inner_mtype is Panel or Hierarchical.
184 Entries are forecasts of lower/upper interval end,
185 for var in col index, at nominal coverage in second col index,
186 lower/upper depending on third col index, for the row index.
187 Upper/lower interval end forecasts are equivalent to
188 quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.
189 """
190 implements_interval_adapter = self._has_implementation_of("_extract_conf_int")
191 implements_quantiles = self._has_implementation_of("_predict_quantiles")
192
193 if not implements_interval_adapter and implements_quantiles:
194 return BaseForecaster._predict_interval(self, fh, X=X, coverage=coverage)
195
196 start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]
197 fh_int = fh.to_absolute_int(self._y.index[0], self.cutoff) - len(self._y)
198 # if fh > 1 steps ahead of cutoff
199 fh_int = fh_int - fh_int[0]
200
201 get_prediction_arguments = {"start": start, "end": end}
202
203 if hasattr(self, "random_state"):
204 get_prediction_arguments["random_state"] = self.random_state
205
206 if inspect.signature(self._fitted_forecaster.get_prediction).parameters.get(
207 "exog"
208 ):
209 get_prediction_arguments["exog"] = X
210
211 prediction_results = self._fitted_forecaster.get_prediction(
212 **get_prediction_arguments
213 )
214
215 var_names = self._get_varnames()
216 var_name = var_names[0]
217 columns = pd.MultiIndex.from_product([var_names, coverage, ["lower", "upper"]])
218 preds_index = self._extract_conf_int(prediction_results, (1 - coverage[0]))
219 preds_index = preds_index.iloc[fh_int].index
220 pred_int = pd.DataFrame(index=preds_index, columns=columns)
221
222 for c in coverage:
223 pred_statsmodels = self._extract_conf_int(prediction_results, (1 - c))
224
225 pred_int[(var_name, c, "lower")] = pred_statsmodels.iloc[fh_int]["lower"]
226 pred_int[(var_name, c, "upper")] = pred_statsmodels.iloc[fh_int]["upper"]
227
228 return pred_int
229
230 def _get_fitted_params(self):
231 """Get fitted parameters.
232
233 Returns
234 -------
235 fitted_params : dict
236 """
237 fitted_params = {}
238 for name in self._get_fitted_param_names():
239 if name in ["aic", "aicc", "bic", "hqic"]:
240 fitted_params[name] = getattr(self._fitted_forecaster, name, None)
241 else:
242 fitted_params[name] = self._fitted_forecaster.params.get(name)
243 return fitted_params
244
245 def _get_fitted_param_names(self):
246 """Get names of fitted parameters."""
247 return self._fitted_param_names
248
249
250 def _coerce_int_to_range_index(y, X=None):
251 new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)
252 try:
253 np.testing.assert_array_equal(y.index, new_index)
254 except AssertionError:
255 raise ValueError(
256 "Coercion of integer pd.Index to pd.RangeIndex "
257 "failed. Please provide `y_train` with a "
258 "pd.RangeIndex."
259 )
260 y.index = new_index
261 if X is not None:
262 X.index = new_index
263 return y, X
264
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/sktime/forecasting/base/adapters/_statsmodels.py b/sktime/forecasting/base/adapters/_statsmodels.py
--- a/sktime/forecasting/base/adapters/_statsmodels.py
+++ b/sktime/forecasting/base/adapters/_statsmodels.py
@@ -110,13 +110,15 @@
# bug fix for evaluate function as test_plus_train indices are passed
# statsmodels exog must contain test indices only.
# For discussion see https://github.com/sktime/sktime/issues/3830
- if X is not None:
+ if X is not None and self._X is not None:
ind_drop = self._X.index
X = X.loc[~X.index.isin(ind_drop)]
# Entire range of the forecast horizon is required
X = X.iloc[: (fh_int[-1] + 1)] # include end point
if "exog" in inspect.signature(self._forecaster.__init__).parameters.keys():
+ if self._X is None:
+ X = None # change X passed in predict to None if X wasn't passed to fit
y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)
else:
y_pred = self._fitted_forecaster.predict(start=start, end=end)
|
{"golden_diff": "diff --git a/sktime/forecasting/base/adapters/_statsmodels.py b/sktime/forecasting/base/adapters/_statsmodels.py\n--- a/sktime/forecasting/base/adapters/_statsmodels.py\n+++ b/sktime/forecasting/base/adapters/_statsmodels.py\n@@ -110,13 +110,15 @@\n # bug fix for evaluate function as test_plus_train indices are passed\n # statsmodels exog must contain test indices only.\n # For discussion see https://github.com/sktime/sktime/issues/3830\n- if X is not None:\n+ if X is not None and self._X is not None:\n ind_drop = self._X.index\n X = X.loc[~X.index.isin(ind_drop)]\n # Entire range of the forecast horizon is required\n X = X.iloc[: (fh_int[-1] + 1)] # include end point\n \n if \"exog\" in inspect.signature(self._forecaster.__init__).parameters.keys():\n+ if self._X is None:\n+ X = None # change X passed in predict to None if X wasn't passed to fit\n y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)\n else:\n y_pred = self._fitted_forecaster.predict(start=start, end=end)\n", "issue": "[BUG] `SARIMAX` fails when `X` is passed in `predict` but not used in `fit`\n## Minimal Reproducible Example\r\n\r\n```pycon\r\n>>> \r\n>>> from sktime.datasets import load_longley\r\n>>> from sktime.forecasting.sarimax import SARIMAX\r\n>>> from sktime.split import temporal_train_test_split\r\n>>> \r\n>>> y, X = load_longley()\r\n>>> \r\n>>> y_train, _, _, X_test = temporal_train_test_split(y, X)\r\n>>> \r\n>>> forecaster = SARIMAX()\r\n>>> \r\n>>> forecaster.fit(y_train)\r\nSARIMAX()\r\n>>> \r\n>>> # works\r\n>>> forecaster.predict(fh=[1, 2, 3, 4])\r\n1959 66061.176439\r\n1960 65682.034815\r\n1961 65363.883253\r\n1962 65096.910677\r\nFreq: A-DEC, Name: TOTEMP, dtype: float64\r\n>>> \r\n>>> # fails\r\n>>> forecaster.predict(fh=[1, 2, 3, 4], X=X_test)\r\nTraceback (most recent call last):\r\n File \"<stdin>\", line 1, in <module>\r\n File \"/home/anirban/sktime-fork/sktime/forecasting/base/_base.py\", line 412, in predict\r\n y_pred = self._predict(fh=fh, X=X_inner)\r\n File \"/home/anirban/sktime-fork/sktime/forecasting/base/adapters/_statsmodels.py\", line 108, in _predict\r\n ind_drop = self._X.index\r\nAttributeError: 'NoneType' object has no attribute 'index'\r\n>>> \r\n```\r\n\r\n## Expectation\r\n\r\nI was expecting no failures, and identical behaviour in both cases. I do get that behaviour if I try with `sktime.forecasting.arima.ARIMA`.\r\n\r\n## Version\r\n\r\nOperating System: Ubuntu 22.04.3 LTS (WSL)\r\nPython: 3.10.12\r\nSktime: e51ec2472a\r\n\n", "before_files": [{"content": "# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for statsmodels forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"mloning\", \"ciaran-g\"]\n__all__ = [\"_StatsModelsAdapter\"]\n\nimport inspect\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.forecasting.base import BaseForecaster\nfrom sktime.utils.warnings import warn\n\n\nclass _StatsModelsAdapter(BaseForecaster):\n \"\"\"Base class for interfacing statsmodels forecasting algorithms.\"\"\"\n\n _fitted_param_names = ()\n _tags = {\n # packaging info\n # --------------\n \"authors\": [\"mloning\", \"ciaran-g\"],\n \"maintainers\": [\"ciaran-g\"],\n \"python_dependencies\": \"statsmodels\",\n # estimator type\n # --------------\n \"ignores-exogeneous-X\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n }\n\n def __init__(self, random_state=None):\n self._forecaster = None\n self.random_state = random_state\n self._fitted_forecaster = None\n super().__init__()\n\n def _fit(self, y, X, fh):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n # statsmodels does not support the pd.Int64Index as required,\n # so we coerce them here to pd.RangeIndex\n if isinstance(y, pd.Series) and pd.api.types.is_integer_dtype(y.index):\n y, X = _coerce_int_to_range_index(y, X)\n self._fit_forecaster(y, X)\n return self\n\n def _fit_forecaster(self, y_train, X_train=None):\n \"\"\"Log used internally in fit.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def _update(self, y, X=None, update_params=True):\n \"\"\"Update used internally in update.\"\"\"\n if update_params or self.is_composite():\n super()._update(y, X, update_params=update_params)\n else:\n if not hasattr(self._fitted_forecaster, \"append\"):\n warn(\n f\"NotImplementedWarning: {self.__class__.__name__} \"\n f\"can not accept new data when update_params=False. \"\n f\"Call with update_params=True to refit with new data.\",\n obj=self,\n )\n else:\n # only append unseen data to fitted forecaster\n index_diff = y.index.difference(\n self._fitted_forecaster.fittedvalues.index\n )\n if index_diff.isin(y.index).all():\n y = y.loc[index_diff]\n self._fitted_forecaster = self._fitted_forecaster.append(y)\n\n def _predict(self, fh, X):\n \"\"\"Make forecasts.\n\n Parameters\n ----------\n fh : ForecastingHorizon\n The forecasters horizon with the steps ahead to to predict.\n Default is one-step ahead forecast,\n i.e. np.array([1])\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored.\n\n Returns\n -------\n y_pred : pd.Series\n Returns series of predicted values.\n \"\"\"\n # statsmodels requires zero-based indexing starting at the\n # beginning of the training series when passing integers\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n fh_int = fh.to_absolute_int(self._y.index[0], self.cutoff) - len(self._y)\n\n # bug fix for evaluate function as test_plus_train indices are passed\n # statsmodels exog must contain test indices only.\n # For discussion see https://github.com/sktime/sktime/issues/3830\n if X is not None:\n ind_drop = self._X.index\n X = X.loc[~X.index.isin(ind_drop)]\n # Entire range of the forecast horizon is required\n X = X.iloc[: (fh_int[-1] + 1)] # include end point\n\n if \"exog\" in inspect.signature(self._forecaster.__init__).parameters.keys():\n y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)\n else:\n y_pred = self._fitted_forecaster.predict(start=start, end=end)\n\n # statsmodels forecasts all periods from start to end of forecasting\n # horizon, but only return given time points in forecasting horizon\n # if fh[0] > 1 steps ahead of cutoff then make relative to `start`\n fh_int = fh_int - fh_int[0]\n y_pred = y_pred.iloc[fh_int]\n # ensure that name is not added nor removed\n # otherwise this may upset conversion to pd.DataFrame\n y_pred.name = self._y.name\n return y_pred\n\n @staticmethod\n def _extract_conf_int(prediction_results, alpha) -> pd.DataFrame:\n \"\"\"Construct confidence interval at specified `alpha` for each timestep.\n\n Parameters\n ----------\n prediction_results : PredictionResults\n results class, as returned by ``self._fitted_forecaster.get_prediction``\n alpha : float\n one minus nominal coverage\n\n Returns\n -------\n pd.DataFrame\n confidence intervals at each timestep\n\n The dataframe must have at least two columns ``lower`` and ``upper``, and\n the row indices must be integers relative to ``self.cutoff``. Order of\n columns do not matter, and row indices must be a superset of relative\n integer horizon of ``fh``.\n \"\"\"\n del prediction_results, alpha # tools like ``vulture`` may complain as unused\n\n raise NotImplementedError(\"abstract method\")\n\n def _predict_interval(self, fh, X, coverage):\n \"\"\"Compute/return prediction interval forecasts.\n\n private _predict_interval containing the core logic,\n called from predict_interval and default _predict_quantiles\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n X : optional (default=None)\n guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n Exogeneous time series to predict from.\n coverage : float or list of float, optional (default=0.95)\n nominal coverage(s) of predictive interval(s)\n\n Returns\n -------\n pred_int : pd.DataFrame\n Column has multi-index: first level is variable name from y in fit,\n second level coverage fractions for which intervals were computed.\n in the same order as in input `coverage`.\n Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n Row index is fh, with additional (upper) levels equal to instance levels,\n from y seen in fit, if y_inner_mtype is Panel or Hierarchical.\n Entries are forecasts of lower/upper interval end,\n for var in col index, at nominal coverage in second col index,\n lower/upper depending on third col index, for the row index.\n Upper/lower interval end forecasts are equivalent to\n quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n \"\"\"\n implements_interval_adapter = self._has_implementation_of(\"_extract_conf_int\")\n implements_quantiles = self._has_implementation_of(\"_predict_quantiles\")\n\n if not implements_interval_adapter and implements_quantiles:\n return BaseForecaster._predict_interval(self, fh, X=X, coverage=coverage)\n\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n fh_int = fh.to_absolute_int(self._y.index[0], self.cutoff) - len(self._y)\n # if fh > 1 steps ahead of cutoff\n fh_int = fh_int - fh_int[0]\n\n get_prediction_arguments = {\"start\": start, \"end\": end}\n\n if hasattr(self, \"random_state\"):\n get_prediction_arguments[\"random_state\"] = self.random_state\n\n if inspect.signature(self._fitted_forecaster.get_prediction).parameters.get(\n \"exog\"\n ):\n get_prediction_arguments[\"exog\"] = X\n\n prediction_results = self._fitted_forecaster.get_prediction(\n **get_prediction_arguments\n )\n\n var_names = self._get_varnames()\n var_name = var_names[0]\n columns = pd.MultiIndex.from_product([var_names, coverage, [\"lower\", \"upper\"]])\n preds_index = self._extract_conf_int(prediction_results, (1 - coverage[0]))\n preds_index = preds_index.iloc[fh_int].index\n pred_int = pd.DataFrame(index=preds_index, columns=columns)\n\n for c in coverage:\n pred_statsmodels = self._extract_conf_int(prediction_results, (1 - c))\n\n pred_int[(var_name, c, \"lower\")] = pred_statsmodels.iloc[fh_int][\"lower\"]\n pred_int[(var_name, c, \"upper\")] = pred_statsmodels.iloc[fh_int][\"upper\"]\n\n return pred_int\n\n def _get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n fitted_params = {}\n for name in self._get_fitted_param_names():\n if name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(self._fitted_forecaster, name, None)\n else:\n fitted_params[name] = self._fitted_forecaster.params.get(name)\n return fitted_params\n\n def _get_fitted_param_names(self):\n \"\"\"Get names of fitted parameters.\"\"\"\n return self._fitted_param_names\n\n\ndef _coerce_int_to_range_index(y, X=None):\n new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)\n try:\n np.testing.assert_array_equal(y.index, new_index)\n except AssertionError:\n raise ValueError(\n \"Coercion of integer pd.Index to pd.RangeIndex \"\n \"failed. Please provide `y_train` with a \"\n \"pd.RangeIndex.\"\n )\n y.index = new_index\n if X is not None:\n X.index = new_index\n return y, X\n", "path": "sktime/forecasting/base/adapters/_statsmodels.py"}], "after_files": [{"content": "# !/usr/bin/env python3 -u\n# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)\n\"\"\"Implements adapter for statsmodels forecasters to be used in sktime framework.\"\"\"\n\n__author__ = [\"mloning\", \"ciaran-g\"]\n__all__ = [\"_StatsModelsAdapter\"]\n\nimport inspect\n\nimport numpy as np\nimport pandas as pd\n\nfrom sktime.forecasting.base import BaseForecaster\nfrom sktime.utils.warnings import warn\n\n\nclass _StatsModelsAdapter(BaseForecaster):\n \"\"\"Base class for interfacing statsmodels forecasting algorithms.\"\"\"\n\n _fitted_param_names = ()\n _tags = {\n # packaging info\n # --------------\n \"authors\": [\"mloning\", \"ciaran-g\"],\n \"maintainers\": [\"ciaran-g\"],\n \"python_dependencies\": \"statsmodels\",\n # estimator type\n # --------------\n \"ignores-exogeneous-X\": True,\n \"requires-fh-in-fit\": False,\n \"handles-missing-data\": False,\n }\n\n def __init__(self, random_state=None):\n self._forecaster = None\n self.random_state = random_state\n self._fitted_forecaster = None\n super().__init__()\n\n def _fit(self, y, X, fh):\n \"\"\"Fit to training data.\n\n Parameters\n ----------\n y : pd.Series\n Target time series to which to fit the forecaster.\n fh : int, list or np.array, optional (default=None)\n The forecasters horizon with the steps ahead to to predict.\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored\n\n Returns\n -------\n self : returns an instance of self.\n \"\"\"\n # statsmodels does not support the pd.Int64Index as required,\n # so we coerce them here to pd.RangeIndex\n if isinstance(y, pd.Series) and pd.api.types.is_integer_dtype(y.index):\n y, X = _coerce_int_to_range_index(y, X)\n self._fit_forecaster(y, X)\n return self\n\n def _fit_forecaster(self, y_train, X_train=None):\n \"\"\"Log used internally in fit.\"\"\"\n raise NotImplementedError(\"abstract method\")\n\n def _update(self, y, X=None, update_params=True):\n \"\"\"Update used internally in update.\"\"\"\n if update_params or self.is_composite():\n super()._update(y, X, update_params=update_params)\n else:\n if not hasattr(self._fitted_forecaster, \"append\"):\n warn(\n f\"NotImplementedWarning: {self.__class__.__name__} \"\n f\"can not accept new data when update_params=False. \"\n f\"Call with update_params=True to refit with new data.\",\n obj=self,\n )\n else:\n # only append unseen data to fitted forecaster\n index_diff = y.index.difference(\n self._fitted_forecaster.fittedvalues.index\n )\n if index_diff.isin(y.index).all():\n y = y.loc[index_diff]\n self._fitted_forecaster = self._fitted_forecaster.append(y)\n\n def _predict(self, fh, X):\n \"\"\"Make forecasts.\n\n Parameters\n ----------\n fh : ForecastingHorizon\n The forecasters horizon with the steps ahead to to predict.\n Default is one-step ahead forecast,\n i.e. np.array([1])\n X : pd.DataFrame, optional (default=None)\n Exogenous variables are ignored.\n\n Returns\n -------\n y_pred : pd.Series\n Returns series of predicted values.\n \"\"\"\n # statsmodels requires zero-based indexing starting at the\n # beginning of the training series when passing integers\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n fh_int = fh.to_absolute_int(self._y.index[0], self.cutoff) - len(self._y)\n\n # bug fix for evaluate function as test_plus_train indices are passed\n # statsmodels exog must contain test indices only.\n # For discussion see https://github.com/sktime/sktime/issues/3830\n if X is not None and self._X is not None:\n ind_drop = self._X.index\n X = X.loc[~X.index.isin(ind_drop)]\n # Entire range of the forecast horizon is required\n X = X.iloc[: (fh_int[-1] + 1)] # include end point\n\n if \"exog\" in inspect.signature(self._forecaster.__init__).parameters.keys():\n if self._X is None:\n X = None # change X passed in predict to None if X wasn't passed to fit\n y_pred = self._fitted_forecaster.predict(start=start, end=end, exog=X)\n else:\n y_pred = self._fitted_forecaster.predict(start=start, end=end)\n\n # statsmodels forecasts all periods from start to end of forecasting\n # horizon, but only return given time points in forecasting horizon\n # if fh[0] > 1 steps ahead of cutoff then make relative to `start`\n fh_int = fh_int - fh_int[0]\n y_pred = y_pred.iloc[fh_int]\n # ensure that name is not added nor removed\n # otherwise this may upset conversion to pd.DataFrame\n y_pred.name = self._y.name\n return y_pred\n\n @staticmethod\n def _extract_conf_int(prediction_results, alpha) -> pd.DataFrame:\n \"\"\"Construct confidence interval at specified `alpha` for each timestep.\n\n Parameters\n ----------\n prediction_results : PredictionResults\n results class, as returned by ``self._fitted_forecaster.get_prediction``\n alpha : float\n one minus nominal coverage\n\n Returns\n -------\n pd.DataFrame\n confidence intervals at each timestep\n\n The dataframe must have at least two columns ``lower`` and ``upper``, and\n the row indices must be integers relative to ``self.cutoff``. Order of\n columns do not matter, and row indices must be a superset of relative\n integer horizon of ``fh``.\n \"\"\"\n del prediction_results, alpha # tools like ``vulture`` may complain as unused\n\n raise NotImplementedError(\"abstract method\")\n\n def _predict_interval(self, fh, X, coverage):\n \"\"\"Compute/return prediction interval forecasts.\n\n private _predict_interval containing the core logic,\n called from predict_interval and default _predict_quantiles\n\n Parameters\n ----------\n fh : guaranteed to be ForecastingHorizon\n The forecasting horizon with the steps ahead to to predict.\n X : optional (default=None)\n guaranteed to be of a type in self.get_tag(\"X_inner_mtype\")\n Exogeneous time series to predict from.\n coverage : float or list of float, optional (default=0.95)\n nominal coverage(s) of predictive interval(s)\n\n Returns\n -------\n pred_int : pd.DataFrame\n Column has multi-index: first level is variable name from y in fit,\n second level coverage fractions for which intervals were computed.\n in the same order as in input `coverage`.\n Third level is string \"lower\" or \"upper\", for lower/upper interval end.\n Row index is fh, with additional (upper) levels equal to instance levels,\n from y seen in fit, if y_inner_mtype is Panel or Hierarchical.\n Entries are forecasts of lower/upper interval end,\n for var in col index, at nominal coverage in second col index,\n lower/upper depending on third col index, for the row index.\n Upper/lower interval end forecasts are equivalent to\n quantile forecasts at alpha = 0.5 - c/2, 0.5 + c/2 for c in coverage.\n \"\"\"\n implements_interval_adapter = self._has_implementation_of(\"_extract_conf_int\")\n implements_quantiles = self._has_implementation_of(\"_predict_quantiles\")\n\n if not implements_interval_adapter and implements_quantiles:\n return BaseForecaster._predict_interval(self, fh, X=X, coverage=coverage)\n\n start, end = fh.to_absolute_int(self._y.index[0], self.cutoff)[[0, -1]]\n fh_int = fh.to_absolute_int(self._y.index[0], self.cutoff) - len(self._y)\n # if fh > 1 steps ahead of cutoff\n fh_int = fh_int - fh_int[0]\n\n get_prediction_arguments = {\"start\": start, \"end\": end}\n\n if hasattr(self, \"random_state\"):\n get_prediction_arguments[\"random_state\"] = self.random_state\n\n if inspect.signature(self._fitted_forecaster.get_prediction).parameters.get(\n \"exog\"\n ):\n get_prediction_arguments[\"exog\"] = X\n\n prediction_results = self._fitted_forecaster.get_prediction(\n **get_prediction_arguments\n )\n\n var_names = self._get_varnames()\n var_name = var_names[0]\n columns = pd.MultiIndex.from_product([var_names, coverage, [\"lower\", \"upper\"]])\n preds_index = self._extract_conf_int(prediction_results, (1 - coverage[0]))\n preds_index = preds_index.iloc[fh_int].index\n pred_int = pd.DataFrame(index=preds_index, columns=columns)\n\n for c in coverage:\n pred_statsmodels = self._extract_conf_int(prediction_results, (1 - c))\n\n pred_int[(var_name, c, \"lower\")] = pred_statsmodels.iloc[fh_int][\"lower\"]\n pred_int[(var_name, c, \"upper\")] = pred_statsmodels.iloc[fh_int][\"upper\"]\n\n return pred_int\n\n def _get_fitted_params(self):\n \"\"\"Get fitted parameters.\n\n Returns\n -------\n fitted_params : dict\n \"\"\"\n fitted_params = {}\n for name in self._get_fitted_param_names():\n if name in [\"aic\", \"aicc\", \"bic\", \"hqic\"]:\n fitted_params[name] = getattr(self._fitted_forecaster, name, None)\n else:\n fitted_params[name] = self._fitted_forecaster.params.get(name)\n return fitted_params\n\n def _get_fitted_param_names(self):\n \"\"\"Get names of fitted parameters.\"\"\"\n return self._fitted_param_names\n\n\ndef _coerce_int_to_range_index(y, X=None):\n new_index = pd.RangeIndex(y.index[0], y.index[-1] + 1)\n try:\n np.testing.assert_array_equal(y.index, new_index)\n except AssertionError:\n raise ValueError(\n \"Coercion of integer pd.Index to pd.RangeIndex \"\n \"failed. Please provide `y_train` with a \"\n \"pd.RangeIndex.\"\n )\n y.index = new_index\n if X is not None:\n X.index = new_index\n return y, X\n", "path": "sktime/forecasting/base/adapters/_statsmodels.py"}]}
| 3,731 | 299 |
gh_patches_debug_5387
|
rasdani/github-patches
|
git_diff
|
pre-commit__pre-commit-1262
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Slightly broken links in output
`pre-commit autoupdate` outputs repository links:
```
Updating https://github.com/psf/black...already up to date.
Updating https://github.com/prettier/prettier...already up to date.
```
In iTerm2 on a Mac using Fish Shell—and probably lots of other setups as well—you can click the repository links (by holding down the _Command_ key):
<img width="668" alt="Screenshot 2020-01-01 at 15 21 32" src="https://user-images.githubusercontent.com/8469540/71642362-6fcd2800-2caa-11ea-9e00-d463dcdf9682.png">
But the link is slightly broken because there is no space after it—we're getting https://github.com/asottile/seed-isort-config...already instead of https://github.com/asottile/seed-isort-config.
This is a tiny issue, but it would be nice if we could fix it. I'll try to make a pull request to show what I mean.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pre_commit/commands/autoupdate.py`
Content:
```
1 from __future__ import print_function
2 from __future__ import unicode_literals
3
4 import collections
5 import os.path
6 import re
7
8 import six
9 from aspy.yaml import ordered_dump
10 from aspy.yaml import ordered_load
11
12 import pre_commit.constants as C
13 from pre_commit import git
14 from pre_commit import output
15 from pre_commit.clientlib import InvalidManifestError
16 from pre_commit.clientlib import load_config
17 from pre_commit.clientlib import load_manifest
18 from pre_commit.clientlib import LOCAL
19 from pre_commit.clientlib import META
20 from pre_commit.commands.migrate_config import migrate_config
21 from pre_commit.util import CalledProcessError
22 from pre_commit.util import cmd_output
23 from pre_commit.util import cmd_output_b
24 from pre_commit.util import tmpdir
25
26
27 class RevInfo(collections.namedtuple('RevInfo', ('repo', 'rev', 'frozen'))):
28 __slots__ = ()
29
30 @classmethod
31 def from_config(cls, config):
32 return cls(config['repo'], config['rev'], None)
33
34 def update(self, tags_only, freeze):
35 if tags_only:
36 tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')
37 else:
38 tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')
39
40 with tmpdir() as tmp:
41 git.init_repo(tmp, self.repo)
42 cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)
43
44 try:
45 rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()
46 except CalledProcessError:
47 cmd = ('git', 'rev-parse', 'FETCH_HEAD')
48 rev = cmd_output(*cmd, cwd=tmp)[1].strip()
49
50 frozen = None
51 if freeze:
52 exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()
53 if exact != rev:
54 rev, frozen = exact, rev
55 return self._replace(rev=rev, frozen=frozen)
56
57
58 class RepositoryCannotBeUpdatedError(RuntimeError):
59 pass
60
61
62 def _check_hooks_still_exist_at_rev(repo_config, info, store):
63 try:
64 path = store.clone(repo_config['repo'], info.rev)
65 manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))
66 except InvalidManifestError as e:
67 raise RepositoryCannotBeUpdatedError(six.text_type(e))
68
69 # See if any of our hooks were deleted with the new commits
70 hooks = {hook['id'] for hook in repo_config['hooks']}
71 hooks_missing = hooks - {hook['id'] for hook in manifest}
72 if hooks_missing:
73 raise RepositoryCannotBeUpdatedError(
74 'Cannot update because the tip of master is missing these hooks:\n'
75 '{}'.format(', '.join(sorted(hooks_missing))),
76 )
77
78
79 REV_LINE_RE = re.compile(r'^(\s+)rev:(\s*)([^\s#]+)(.*)(\r?\n)$', re.DOTALL)
80 REV_LINE_FMT = '{}rev:{}{}{}{}'
81
82
83 def _original_lines(path, rev_infos, retry=False):
84 """detect `rev:` lines or reformat the file"""
85 with open(path) as f:
86 original = f.read()
87
88 lines = original.splitlines(True)
89 idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]
90 if len(idxs) == len(rev_infos):
91 return lines, idxs
92 elif retry:
93 raise AssertionError('could not find rev lines')
94 else:
95 with open(path, 'w') as f:
96 f.write(ordered_dump(ordered_load(original), **C.YAML_DUMP_KWARGS))
97 return _original_lines(path, rev_infos, retry=True)
98
99
100 def _write_new_config(path, rev_infos):
101 lines, idxs = _original_lines(path, rev_infos)
102
103 for idx, rev_info in zip(idxs, rev_infos):
104 if rev_info is None:
105 continue
106 match = REV_LINE_RE.match(lines[idx])
107 assert match is not None
108 new_rev_s = ordered_dump({'rev': rev_info.rev}, **C.YAML_DUMP_KWARGS)
109 new_rev = new_rev_s.split(':', 1)[1].strip()
110 if rev_info.frozen is not None:
111 comment = ' # {}'.format(rev_info.frozen)
112 else:
113 comment = match.group(4)
114 lines[idx] = REV_LINE_FMT.format(
115 match.group(1), match.group(2), new_rev, comment, match.group(5),
116 )
117
118 with open(path, 'w') as f:
119 f.write(''.join(lines))
120
121
122 def autoupdate(config_file, store, tags_only, freeze, repos=()):
123 """Auto-update the pre-commit config to the latest versions of repos."""
124 migrate_config(config_file, quiet=True)
125 retv = 0
126 rev_infos = []
127 changed = False
128
129 config = load_config(config_file)
130 for repo_config in config['repos']:
131 if repo_config['repo'] in {LOCAL, META}:
132 continue
133
134 info = RevInfo.from_config(repo_config)
135 if repos and info.repo not in repos:
136 rev_infos.append(None)
137 continue
138
139 output.write('Updating {}...'.format(info.repo))
140 new_info = info.update(tags_only=tags_only, freeze=freeze)
141 try:
142 _check_hooks_still_exist_at_rev(repo_config, new_info, store)
143 except RepositoryCannotBeUpdatedError as error:
144 output.write_line(error.args[0])
145 rev_infos.append(None)
146 retv = 1
147 continue
148
149 if new_info.rev != info.rev:
150 changed = True
151 if new_info.frozen:
152 updated_to = '{} (frozen)'.format(new_info.frozen)
153 else:
154 updated_to = new_info.rev
155 msg = 'updating {} -> {}.'.format(info.rev, updated_to)
156 output.write_line(msg)
157 rev_infos.append(new_info)
158 else:
159 output.write_line('already up to date.')
160 rev_infos.append(None)
161
162 if changed:
163 _write_new_config(config_file, rev_infos)
164
165 return retv
166
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py
--- a/pre_commit/commands/autoupdate.py
+++ b/pre_commit/commands/autoupdate.py
@@ -136,7 +136,7 @@
rev_infos.append(None)
continue
- output.write('Updating {}...'.format(info.repo))
+ output.write('Updating {} ... '.format(info.repo))
new_info = info.update(tags_only=tags_only, freeze=freeze)
try:
_check_hooks_still_exist_at_rev(repo_config, new_info, store)
|
{"golden_diff": "diff --git a/pre_commit/commands/autoupdate.py b/pre_commit/commands/autoupdate.py\n--- a/pre_commit/commands/autoupdate.py\n+++ b/pre_commit/commands/autoupdate.py\n@@ -136,7 +136,7 @@\n rev_infos.append(None)\n continue\n \n- output.write('Updating {}...'.format(info.repo))\n+ output.write('Updating {} ... '.format(info.repo))\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n", "issue": "Slightly broken links in output\n`pre-commit autoupdate` outputs repository links:\r\n\r\n```\r\nUpdating https://github.com/psf/black...already up to date.\r\nUpdating https://github.com/prettier/prettier...already up to date.\r\n```\r\n\r\nIn iTerm2 on a Mac using Fish Shell\u2014and probably lots of other setups as well\u2014you can click the repository links (by holding down the _Command_ key):\r\n\r\n<img width=\"668\" alt=\"Screenshot 2020-01-01 at 15 21 32\" src=\"https://user-images.githubusercontent.com/8469540/71642362-6fcd2800-2caa-11ea-9e00-d463dcdf9682.png\">\r\n\r\nBut the link is slightly broken because there is no space after it\u2014we're getting https://github.com/asottile/seed-isort-config...already instead of https://github.com/asottile/seed-isort-config.\r\n\r\nThis is a tiny issue, but it would be nice if we could fix it. I'll try to make a pull request to show what I mean.\n", "before_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\nimport os.path\nimport re\n\nimport six\nfrom aspy.yaml import ordered_dump\nfrom aspy.yaml import ordered_load\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\n\n\nclass RevInfo(collections.namedtuple('RevInfo', ('repo', 'rev', 'frozen'))):\n __slots__ = ()\n\n @classmethod\n def from_config(cls, config):\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only, freeze):\n if tags_only:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')\n else:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = ('git', 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(repo_config, info, store):\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(six.text_type(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{}'.format(', '.join(sorted(hooks_missing))),\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([^\\s#]+)(.*)(\\r?\\n)$', re.DOTALL)\nREV_LINE_FMT = '{}rev:{}{}{}{}'\n\n\ndef _original_lines(path, rev_infos, retry=False):\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path) as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(ordered_dump(ordered_load(original), **C.YAML_DUMP_KWARGS))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path, rev_infos):\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = ordered_dump({'rev': rev_info.rev}, **C.YAML_DUMP_KWARGS)\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = ' # {}'.format(rev_info.frozen)\n else:\n comment = match.group(4)\n lines[idx] = REV_LINE_FMT.format(\n match.group(1), match.group(2), new_rev, comment, match.group(5),\n )\n\n with open(path, 'w') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(config_file, store, tags_only, freeze, repos=()):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write('Updating {}...'.format(info.repo))\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = '{} (frozen)'.format(new_info.frozen)\n else:\n updated_to = new_info.rev\n msg = 'updating {} -> {}.'.format(info.rev, updated_to)\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n", "path": "pre_commit/commands/autoupdate.py"}], "after_files": [{"content": "from __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport collections\nimport os.path\nimport re\n\nimport six\nfrom aspy.yaml import ordered_dump\nfrom aspy.yaml import ordered_load\n\nimport pre_commit.constants as C\nfrom pre_commit import git\nfrom pre_commit import output\nfrom pre_commit.clientlib import InvalidManifestError\nfrom pre_commit.clientlib import load_config\nfrom pre_commit.clientlib import load_manifest\nfrom pre_commit.clientlib import LOCAL\nfrom pre_commit.clientlib import META\nfrom pre_commit.commands.migrate_config import migrate_config\nfrom pre_commit.util import CalledProcessError\nfrom pre_commit.util import cmd_output\nfrom pre_commit.util import cmd_output_b\nfrom pre_commit.util import tmpdir\n\n\nclass RevInfo(collections.namedtuple('RevInfo', ('repo', 'rev', 'frozen'))):\n __slots__ = ()\n\n @classmethod\n def from_config(cls, config):\n return cls(config['repo'], config['rev'], None)\n\n def update(self, tags_only, freeze):\n if tags_only:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--abbrev=0')\n else:\n tag_cmd = ('git', 'describe', 'FETCH_HEAD', '--tags', '--exact')\n\n with tmpdir() as tmp:\n git.init_repo(tmp, self.repo)\n cmd_output_b('git', 'fetch', 'origin', 'HEAD', '--tags', cwd=tmp)\n\n try:\n rev = cmd_output(*tag_cmd, cwd=tmp)[1].strip()\n except CalledProcessError:\n cmd = ('git', 'rev-parse', 'FETCH_HEAD')\n rev = cmd_output(*cmd, cwd=tmp)[1].strip()\n\n frozen = None\n if freeze:\n exact = cmd_output('git', 'rev-parse', rev, cwd=tmp)[1].strip()\n if exact != rev:\n rev, frozen = exact, rev\n return self._replace(rev=rev, frozen=frozen)\n\n\nclass RepositoryCannotBeUpdatedError(RuntimeError):\n pass\n\n\ndef _check_hooks_still_exist_at_rev(repo_config, info, store):\n try:\n path = store.clone(repo_config['repo'], info.rev)\n manifest = load_manifest(os.path.join(path, C.MANIFEST_FILE))\n except InvalidManifestError as e:\n raise RepositoryCannotBeUpdatedError(six.text_type(e))\n\n # See if any of our hooks were deleted with the new commits\n hooks = {hook['id'] for hook in repo_config['hooks']}\n hooks_missing = hooks - {hook['id'] for hook in manifest}\n if hooks_missing:\n raise RepositoryCannotBeUpdatedError(\n 'Cannot update because the tip of master is missing these hooks:\\n'\n '{}'.format(', '.join(sorted(hooks_missing))),\n )\n\n\nREV_LINE_RE = re.compile(r'^(\\s+)rev:(\\s*)([^\\s#]+)(.*)(\\r?\\n)$', re.DOTALL)\nREV_LINE_FMT = '{}rev:{}{}{}{}'\n\n\ndef _original_lines(path, rev_infos, retry=False):\n \"\"\"detect `rev:` lines or reformat the file\"\"\"\n with open(path) as f:\n original = f.read()\n\n lines = original.splitlines(True)\n idxs = [i for i, line in enumerate(lines) if REV_LINE_RE.match(line)]\n if len(idxs) == len(rev_infos):\n return lines, idxs\n elif retry:\n raise AssertionError('could not find rev lines')\n else:\n with open(path, 'w') as f:\n f.write(ordered_dump(ordered_load(original), **C.YAML_DUMP_KWARGS))\n return _original_lines(path, rev_infos, retry=True)\n\n\ndef _write_new_config(path, rev_infos):\n lines, idxs = _original_lines(path, rev_infos)\n\n for idx, rev_info in zip(idxs, rev_infos):\n if rev_info is None:\n continue\n match = REV_LINE_RE.match(lines[idx])\n assert match is not None\n new_rev_s = ordered_dump({'rev': rev_info.rev}, **C.YAML_DUMP_KWARGS)\n new_rev = new_rev_s.split(':', 1)[1].strip()\n if rev_info.frozen is not None:\n comment = ' # {}'.format(rev_info.frozen)\n else:\n comment = match.group(4)\n lines[idx] = REV_LINE_FMT.format(\n match.group(1), match.group(2), new_rev, comment, match.group(5),\n )\n\n with open(path, 'w') as f:\n f.write(''.join(lines))\n\n\ndef autoupdate(config_file, store, tags_only, freeze, repos=()):\n \"\"\"Auto-update the pre-commit config to the latest versions of repos.\"\"\"\n migrate_config(config_file, quiet=True)\n retv = 0\n rev_infos = []\n changed = False\n\n config = load_config(config_file)\n for repo_config in config['repos']:\n if repo_config['repo'] in {LOCAL, META}:\n continue\n\n info = RevInfo.from_config(repo_config)\n if repos and info.repo not in repos:\n rev_infos.append(None)\n continue\n\n output.write('Updating {} ... '.format(info.repo))\n new_info = info.update(tags_only=tags_only, freeze=freeze)\n try:\n _check_hooks_still_exist_at_rev(repo_config, new_info, store)\n except RepositoryCannotBeUpdatedError as error:\n output.write_line(error.args[0])\n rev_infos.append(None)\n retv = 1\n continue\n\n if new_info.rev != info.rev:\n changed = True\n if new_info.frozen:\n updated_to = '{} (frozen)'.format(new_info.frozen)\n else:\n updated_to = new_info.rev\n msg = 'updating {} -> {}.'.format(info.rev, updated_to)\n output.write_line(msg)\n rev_infos.append(new_info)\n else:\n output.write_line('already up to date.')\n rev_infos.append(None)\n\n if changed:\n _write_new_config(config_file, rev_infos)\n\n return retv\n", "path": "pre_commit/commands/autoupdate.py"}]}
| 2,232 | 133 |
gh_patches_debug_7097
|
rasdani/github-patches
|
git_diff
|
learningequality__kolibri-3406
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Showing 120% score in exam report
### Observed behavior
After submitting exam, when coach user watching progress of each user in exam report. Coach user see 120% score in exam report. We have attached screenshot and database file,so you can easily re-generate this issue.
### Expected behavior
Score must be between 0-100%.
### Steps to reproduce
1. Copy attached database file in .kolibri folder.
2. login with username "pm" and password "sc".
3. Click on Coach.
4. Click on Class 4A.
5. Click on Exams.
6. See report of the Unit 2B-Final exam.
7. See learner Junaid Shaikh.
### Context
* Kolibri version : Kolibri 0.4.9
* Operating system : Ubuntu 14.04
* Browser : Chrome
### Screenshots

### Database
[db.sqlite3.zip](https://github.com/learningequality/kolibri/files/1617728/db.sqlite3.zip)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `kolibri/logger/serializers.py`
Content:
```
1 from django.db.models import Sum
2 from django.utils.timezone import now
3 from kolibri.auth.models import FacilityUser
4 from kolibri.core.serializers import KolibriModelSerializer
5 from kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog
6 from rest_framework import serializers
7
8
9 class ContentSessionLogSerializer(KolibriModelSerializer):
10
11 extra_fields = serializers.JSONField(default='{}')
12
13 class Meta:
14 model = ContentSessionLog
15 fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',
16 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')
17
18 class ExamLogSerializer(KolibriModelSerializer):
19 progress = serializers.SerializerMethodField()
20 score = serializers.SerializerMethodField()
21
22 def get_progress(self, obj):
23 return obj.attemptlogs.count()
24
25 def get_score(self, obj):
26 return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')
27
28 class Meta:
29 model = ExamLog
30 fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')
31 read_only_fields = ('completion_timestamp', )
32
33 def update(self, instance, validated_data):
34 # This has changed, set the completion timestamp
35 if validated_data.get('closed') and not instance.closed:
36 instance.completion_timestamp = now()
37 return super(ExamLogSerializer, self).update(instance, validated_data)
38
39 class MasteryLogSerializer(KolibriModelSerializer):
40
41 pastattempts = serializers.SerializerMethodField()
42 totalattempts = serializers.SerializerMethodField()
43 mastery_criterion = serializers.JSONField(default='{}')
44
45 class Meta:
46 model = MasteryLog
47 fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',
48 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')
49
50 def get_pastattempts(self, obj):
51 # will return a list of the latest 10 correct and hint_taken fields for each attempt.
52 return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]
53
54 def get_totalattempts(self, obj):
55 return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()
56
57 class AttemptLogSerializer(KolibriModelSerializer):
58 answer = serializers.JSONField(default='{}')
59 interaction_history = serializers.JSONField(default='[]')
60
61 class Meta:
62 model = AttemptLog
63 fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',
64 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
65 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
66
67 class ExamAttemptLogSerializer(KolibriModelSerializer):
68 answer = serializers.JSONField(default='{}', allow_null=True)
69 interaction_history = serializers.JSONField(default='[]')
70
71 class Meta:
72 model = ExamAttemptLog
73 fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',
74 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',
75 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')
76
77 def validate(self, data):
78 # Only do this validation when both are being set
79 # not necessary on PATCH, for example
80 if data.get('examlog') and data.get('user'):
81 try:
82 if data['examlog'].user != data['user']:
83 raise serializers.ValidationError('User field and user for related exam log are not the same')
84 except ExamLog.DoesNotExist:
85 raise serializers.ValidationError('Invalid exam log')
86 return data
87
88 class ContentSummaryLogSerializer(KolibriModelSerializer):
89
90 currentmasterylog = serializers.SerializerMethodField()
91 extra_fields = serializers.JSONField(default='{}')
92
93 class Meta:
94 model = ContentSummaryLog
95 fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',
96 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')
97
98 def get_currentmasterylog(self, obj):
99 try:
100 current_log = obj.masterylogs.latest('end_timestamp')
101 return MasteryLogSerializer(current_log).data
102 except MasteryLog.DoesNotExist:
103 return None
104
105 class UserSessionLogSerializer(KolibriModelSerializer):
106
107 class Meta:
108 model = UserSessionLog
109 fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')
110
111 class TotalContentProgressSerializer(serializers.ModelSerializer):
112
113 progress = serializers.SerializerMethodField()
114
115 class Meta:
116 model = FacilityUser
117 fields = ('progress', 'id')
118
119 def get_progress(self, obj):
120 return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')
121
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/kolibri/logger/serializers.py b/kolibri/logger/serializers.py
--- a/kolibri/logger/serializers.py
+++ b/kolibri/logger/serializers.py
@@ -20,10 +20,10 @@
score = serializers.SerializerMethodField()
def get_progress(self, obj):
- return obj.attemptlogs.count()
+ return obj.exam.question_count
def get_score(self, obj):
- return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')
+ return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')
class Meta:
model = ExamLog
|
{"golden_diff": "diff --git a/kolibri/logger/serializers.py b/kolibri/logger/serializers.py\n--- a/kolibri/logger/serializers.py\n+++ b/kolibri/logger/serializers.py\n@@ -20,10 +20,10 @@\n score = serializers.SerializerMethodField()\n \n def get_progress(self, obj):\n- return obj.attemptlogs.count()\n+ return obj.exam.question_count\n \n def get_score(self, obj):\n- return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')\n+ return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')\n \n class Meta:\n model = ExamLog\n", "issue": "Showing 120% score in exam report\n### Observed behavior\r\nAfter submitting exam, when coach user watching progress of each user in exam report. Coach user see 120% score in exam report. We have attached screenshot and database file,so you can easily re-generate this issue.\r\n\r\n### Expected behavior\r\nScore must be between 0-100%.\r\n\r\n### Steps to reproduce\r\n1. Copy attached database file in .kolibri folder.\r\n2. login with username \"pm\" and password \"sc\".\r\n3. Click on Coach.\r\n4. Click on Class 4A.\r\n5. Click on Exams.\r\n6. See report of the Unit 2B-Final exam.\r\n7. See learner Junaid Shaikh.\r\n\r\n### Context\r\n * Kolibri version : Kolibri 0.4.9\r\n * Operating system : Ubuntu 14.04\r\n * Browser : Chrome \r\n\r\n### Screenshots\r\n\r\n\r\n### Database\r\n[db.sqlite3.zip](https://github.com/learningequality/kolibri/files/1617728/db.sqlite3.zip)\r\n\r\n\r\n\n", "before_files": [{"content": "from django.db.models import Sum\nfrom django.utils.timezone import now\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.core.serializers import KolibriModelSerializer\nfrom kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom rest_framework import serializers\n\n\nclass ContentSessionLogSerializer(KolibriModelSerializer):\n\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSessionLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',\n 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')\n\nclass ExamLogSerializer(KolibriModelSerializer):\n progress = serializers.SerializerMethodField()\n score = serializers.SerializerMethodField()\n\n def get_progress(self, obj):\n return obj.attemptlogs.count()\n\n def get_score(self, obj):\n return obj.attemptlogs.aggregate(Sum('correct')).get('correct__sum')\n\n class Meta:\n model = ExamLog\n fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')\n read_only_fields = ('completion_timestamp', )\n\n def update(self, instance, validated_data):\n # This has changed, set the completion timestamp\n if validated_data.get('closed') and not instance.closed:\n instance.completion_timestamp = now()\n return super(ExamLogSerializer, self).update(instance, validated_data)\n\nclass MasteryLogSerializer(KolibriModelSerializer):\n\n pastattempts = serializers.SerializerMethodField()\n totalattempts = serializers.SerializerMethodField()\n mastery_criterion = serializers.JSONField(default='{}')\n\n class Meta:\n model = MasteryLog\n fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',\n 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')\n\n def get_pastattempts(self, obj):\n # will return a list of the latest 10 correct and hint_taken fields for each attempt.\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]\n\n def get_totalattempts(self, obj):\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()\n\nclass AttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}')\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = AttemptLog\n fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\nclass ExamAttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}', allow_null=True)\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = ExamAttemptLog\n fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\n def validate(self, data):\n # Only do this validation when both are being set\n # not necessary on PATCH, for example\n if data.get('examlog') and data.get('user'):\n try:\n if data['examlog'].user != data['user']:\n raise serializers.ValidationError('User field and user for related exam log are not the same')\n except ExamLog.DoesNotExist:\n raise serializers.ValidationError('Invalid exam log')\n return data\n\nclass ContentSummaryLogSerializer(KolibriModelSerializer):\n\n currentmasterylog = serializers.SerializerMethodField()\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSummaryLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',\n 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')\n\n def get_currentmasterylog(self, obj):\n try:\n current_log = obj.masterylogs.latest('end_timestamp')\n return MasteryLogSerializer(current_log).data\n except MasteryLog.DoesNotExist:\n return None\n\nclass UserSessionLogSerializer(KolibriModelSerializer):\n\n class Meta:\n model = UserSessionLog\n fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')\n\nclass TotalContentProgressSerializer(serializers.ModelSerializer):\n\n progress = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = ('progress', 'id')\n\n def get_progress(self, obj):\n return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')\n", "path": "kolibri/logger/serializers.py"}], "after_files": [{"content": "from django.db.models import Sum\nfrom django.utils.timezone import now\nfrom kolibri.auth.models import FacilityUser\nfrom kolibri.core.serializers import KolibriModelSerializer\nfrom kolibri.logger.models import AttemptLog, ContentSessionLog, ContentSummaryLog, ExamAttemptLog, ExamLog, MasteryLog, UserSessionLog\nfrom rest_framework import serializers\n\n\nclass ContentSessionLogSerializer(KolibriModelSerializer):\n\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSessionLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp',\n 'end_timestamp', 'time_spent', 'kind', 'extra_fields', 'progress')\n\nclass ExamLogSerializer(KolibriModelSerializer):\n progress = serializers.SerializerMethodField()\n score = serializers.SerializerMethodField()\n\n def get_progress(self, obj):\n return obj.exam.question_count\n\n def get_score(self, obj):\n return obj.attemptlogs.values_list('item').order_by('completion_timestamp').distinct().aggregate(Sum('correct')).get('correct__sum')\n\n class Meta:\n model = ExamLog\n fields = ('id', 'exam', 'user', 'closed', 'progress', 'score', 'completion_timestamp')\n read_only_fields = ('completion_timestamp', )\n\n def update(self, instance, validated_data):\n # This has changed, set the completion timestamp\n if validated_data.get('closed') and not instance.closed:\n instance.completion_timestamp = now()\n return super(ExamLogSerializer, self).update(instance, validated_data)\n\nclass MasteryLogSerializer(KolibriModelSerializer):\n\n pastattempts = serializers.SerializerMethodField()\n totalattempts = serializers.SerializerMethodField()\n mastery_criterion = serializers.JSONField(default='{}')\n\n class Meta:\n model = MasteryLog\n fields = ('id', 'summarylog', 'start_timestamp', 'pastattempts', 'totalattempts', 'user',\n 'end_timestamp', 'completion_timestamp', 'mastery_criterion', 'mastery_level', 'complete')\n\n def get_pastattempts(self, obj):\n # will return a list of the latest 10 correct and hint_taken fields for each attempt.\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).values('correct', 'hinted').order_by('-start_timestamp')[:10]\n\n def get_totalattempts(self, obj):\n return AttemptLog.objects.filter(masterylog__summarylog=obj.summarylog).count()\n\nclass AttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}')\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = AttemptLog\n fields = ('id', 'masterylog', 'start_timestamp', 'sessionlog',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\nclass ExamAttemptLogSerializer(KolibriModelSerializer):\n answer = serializers.JSONField(default='{}', allow_null=True)\n interaction_history = serializers.JSONField(default='[]')\n\n class Meta:\n model = ExamAttemptLog\n fields = ('id', 'examlog', 'start_timestamp', 'channel_id', 'content_id',\n 'end_timestamp', 'completion_timestamp', 'item', 'time_spent', 'user',\n 'complete', 'correct', 'hinted', 'answer', 'simple_answer', 'interaction_history')\n\n def validate(self, data):\n # Only do this validation when both are being set\n # not necessary on PATCH, for example\n if data.get('examlog') and data.get('user'):\n try:\n if data['examlog'].user != data['user']:\n raise serializers.ValidationError('User field and user for related exam log are not the same')\n except ExamLog.DoesNotExist:\n raise serializers.ValidationError('Invalid exam log')\n return data\n\nclass ContentSummaryLogSerializer(KolibriModelSerializer):\n\n currentmasterylog = serializers.SerializerMethodField()\n extra_fields = serializers.JSONField(default='{}')\n\n class Meta:\n model = ContentSummaryLog\n fields = ('pk', 'user', 'content_id', 'channel_id', 'start_timestamp', 'currentmasterylog',\n 'end_timestamp', 'completion_timestamp', 'time_spent', 'progress', 'kind', 'extra_fields')\n\n def get_currentmasterylog(self, obj):\n try:\n current_log = obj.masterylogs.latest('end_timestamp')\n return MasteryLogSerializer(current_log).data\n except MasteryLog.DoesNotExist:\n return None\n\nclass UserSessionLogSerializer(KolibriModelSerializer):\n\n class Meta:\n model = UserSessionLog\n fields = ('pk', 'user', 'channels', 'start_timestamp', 'last_interaction_timestamp', 'pages')\n\nclass TotalContentProgressSerializer(serializers.ModelSerializer):\n\n progress = serializers.SerializerMethodField()\n\n class Meta:\n model = FacilityUser\n fields = ('progress', 'id')\n\n def get_progress(self, obj):\n return obj.contentsummarylog_set.filter(progress=1).aggregate(Sum('progress')).get('progress__sum')\n", "path": "kolibri/logger/serializers.py"}]}
| 1,909 | 162 |
gh_patches_debug_4446
|
rasdani/github-patches
|
git_diff
|
zenml-io__zenml-317
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Repeated Paragraph in the documentation for `core-concepts`
In the file `core-concepts.md`, the section on [`Pipeline`](https://github.com/zenml-io/zenml/blob/b94dff83f0e7c8ab29e99d6b42a0c906a3512b63/docs/book/introduction/core-concepts.md?plain=1#L27-L41) includes a repeated paragraph. The first paragraph in the the pipeline section is repeated in the 3rd paragraph of the same section.
```markdown
Within your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML
pipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored
within the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a
step. The standard pipelines within ZenML are designed to have easy interfaces to add pre-decided steps, with the
order also pre-decided. Other sorts of pipelines can be created as well from scratch.
Pipelines are designed as simple functions. They are created by using decorators appropriate to the specific use case
you have. The moment it is `run`, a pipeline is compiled and passed directly to the orchestrator, to be run in the
orchestrator environment.
Within your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML
pipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored
within the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a
step. The standard pipelines (like `TrainingPipeline`) within ZenML are designed to have easy interfaces to add
pre-decided steps, with the order also pre-decided. Other sorts of pipelines can be created as well from scratch.
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/zenml/materializers/built_in_materializer.py`
Content:
```
1 # Copyright (c) ZenML GmbH 2021. All Rights Reserved.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at:
6 #
7 # https://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
12 # or implied. See the License for the specific language governing
13 # permissions and limitations under the License.
14 import os
15 from typing import Any, Type
16
17 from zenml.artifacts import DataAnalysisArtifact, DataArtifact
18 from zenml.logger import get_logger
19 from zenml.materializers.base_materializer import BaseMaterializer
20 from zenml.utils import yaml_utils
21
22 logger = get_logger(__name__)
23 DEFAULT_FILENAME = "data.json"
24
25
26 class BuiltInMaterializer(BaseMaterializer):
27 """Read/Write JSON files."""
28
29 # TODO [LOW]: consider adding typing.Dict and typing.List
30 # since these are the 'correct' way to annotate these types.
31
32 ASSOCIATED_ARTIFACT_TYPES = [
33 DataArtifact,
34 DataAnalysisArtifact,
35 ]
36 ASSOCIATED_TYPES = [
37 int,
38 str,
39 bytes,
40 dict,
41 float,
42 list,
43 tuple,
44 bool,
45 ]
46
47 def handle_input(self, data_type: Type[Any]) -> Any:
48 """Reads basic primitive types from json."""
49 super().handle_input(data_type)
50 filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)
51 contents = yaml_utils.read_json(filepath)
52 if type(contents) != data_type:
53 # TODO [ENG-142]: Raise error or try to coerce
54 logger.debug(
55 f"Contents {contents} was type {type(contents)} but expected "
56 f"{data_type}"
57 )
58 return contents
59
60 def handle_return(self, data: Any) -> None:
61 """Handles basic built-in types and stores them as json"""
62 super().handle_return(data)
63 filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)
64 yaml_utils.write_json(filepath, data)
65
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/zenml/materializers/built_in_materializer.py b/src/zenml/materializers/built_in_materializer.py
--- a/src/zenml/materializers/built_in_materializer.py
+++ b/src/zenml/materializers/built_in_materializer.py
@@ -26,7 +26,7 @@
class BuiltInMaterializer(BaseMaterializer):
"""Read/Write JSON files."""
- # TODO [LOW]: consider adding typing.Dict and typing.List
+ # TODO [ENG-322]: consider adding typing.Dict and typing.List
# since these are the 'correct' way to annotate these types.
ASSOCIATED_ARTIFACT_TYPES = [
|
{"golden_diff": "diff --git a/src/zenml/materializers/built_in_materializer.py b/src/zenml/materializers/built_in_materializer.py\n--- a/src/zenml/materializers/built_in_materializer.py\n+++ b/src/zenml/materializers/built_in_materializer.py\n@@ -26,7 +26,7 @@\n class BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n \n- # TODO [LOW]: consider adding typing.Dict and typing.List\n+ # TODO [ENG-322]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n \n ASSOCIATED_ARTIFACT_TYPES = [\n", "issue": "Repeated Paragraph in the documentation for `core-concepts`\nIn the file `core-concepts.md`, the section on [`Pipeline`](https://github.com/zenml-io/zenml/blob/b94dff83f0e7c8ab29e99d6b42a0c906a3512b63/docs/book/introduction/core-concepts.md?plain=1#L27-L41) includes a repeated paragraph. The first paragraph in the the pipeline section is repeated in the 3rd paragraph of the same section. \r\n\r\n```markdown\r\nWithin your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML \r\npipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored \r\nwithin the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a \r\nstep. The standard pipelines within ZenML are designed to have easy interfaces to add pre-decided steps, with the \r\norder also pre-decided. Other sorts of pipelines can be created as well from scratch.\r\n\r\nPipelines are designed as simple functions. They are created by using decorators appropriate to the specific use case \r\nyou have. The moment it is `run`, a pipeline is compiled and passed directly to the orchestrator, to be run in the \r\norchestrator environment.\r\n\r\nWithin your repository, you will have one or more pipelines as part of your experimentation workflow. A ZenML \r\npipeline is a sequence of tasks that execute in a specific order and yield artifacts. The artifacts are stored \r\nwithin the artifact store and indexed via the metadata store. Each individual task within a pipeline is known as a \r\nstep. The standard pipelines (like `TrainingPipeline`) within ZenML are designed to have easy interfaces to add \r\npre-decided steps, with the order also pre-decided. Other sorts of pipelines can be created as well from scratch.\r\n```\n", "before_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport os\nfrom typing import Any, Type\n\nfrom zenml.artifacts import DataAnalysisArtifact, DataArtifact\nfrom zenml.logger import get_logger\nfrom zenml.materializers.base_materializer import BaseMaterializer\nfrom zenml.utils import yaml_utils\n\nlogger = get_logger(__name__)\nDEFAULT_FILENAME = \"data.json\"\n\n\nclass BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n\n # TODO [LOW]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n\n ASSOCIATED_ARTIFACT_TYPES = [\n DataArtifact,\n DataAnalysisArtifact,\n ]\n ASSOCIATED_TYPES = [\n int,\n str,\n bytes,\n dict,\n float,\n list,\n tuple,\n bool,\n ]\n\n def handle_input(self, data_type: Type[Any]) -> Any:\n \"\"\"Reads basic primitive types from json.\"\"\"\n super().handle_input(data_type)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n contents = yaml_utils.read_json(filepath)\n if type(contents) != data_type:\n # TODO [ENG-142]: Raise error or try to coerce\n logger.debug(\n f\"Contents {contents} was type {type(contents)} but expected \"\n f\"{data_type}\"\n )\n return contents\n\n def handle_return(self, data: Any) -> None:\n \"\"\"Handles basic built-in types and stores them as json\"\"\"\n super().handle_return(data)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n yaml_utils.write_json(filepath, data)\n", "path": "src/zenml/materializers/built_in_materializer.py"}], "after_files": [{"content": "# Copyright (c) ZenML GmbH 2021. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at:\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express\n# or implied. See the License for the specific language governing\n# permissions and limitations under the License.\nimport os\nfrom typing import Any, Type\n\nfrom zenml.artifacts import DataAnalysisArtifact, DataArtifact\nfrom zenml.logger import get_logger\nfrom zenml.materializers.base_materializer import BaseMaterializer\nfrom zenml.utils import yaml_utils\n\nlogger = get_logger(__name__)\nDEFAULT_FILENAME = \"data.json\"\n\n\nclass BuiltInMaterializer(BaseMaterializer):\n \"\"\"Read/Write JSON files.\"\"\"\n\n # TODO [ENG-322]: consider adding typing.Dict and typing.List\n # since these are the 'correct' way to annotate these types.\n\n ASSOCIATED_ARTIFACT_TYPES = [\n DataArtifact,\n DataAnalysisArtifact,\n ]\n ASSOCIATED_TYPES = [\n int,\n str,\n bytes,\n dict,\n float,\n list,\n tuple,\n bool,\n ]\n\n def handle_input(self, data_type: Type[Any]) -> Any:\n \"\"\"Reads basic primitive types from json.\"\"\"\n super().handle_input(data_type)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n contents = yaml_utils.read_json(filepath)\n if type(contents) != data_type:\n # TODO [ENG-142]: Raise error or try to coerce\n logger.debug(\n f\"Contents {contents} was type {type(contents)} but expected \"\n f\"{data_type}\"\n )\n return contents\n\n def handle_return(self, data: Any) -> None:\n \"\"\"Handles basic built-in types and stores them as json\"\"\"\n super().handle_return(data)\n filepath = os.path.join(self.artifact.uri, DEFAULT_FILENAME)\n yaml_utils.write_json(filepath, data)\n", "path": "src/zenml/materializers/built_in_materializer.py"}]}
| 1,284 | 150 |
gh_patches_debug_237
|
rasdani/github-patches
|
git_diff
|
keras-team__keras-2992
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Why TF-IDF matrix generated by keras.preprocessing.text.Tokenizer() has negative values?
Say, if run the following script:
> > > import keras
> > > tk = keras.preprocessing.text.Tokenizer()
> > > texts = ['I love you.', 'I love you, too.']
> > > tk.fit_on_texts(texts)
> > > tk.texts_to_matrix(texts, mode='tfidf')
The output will be:
array([[ 0. , -1.09861229, -1.09861229, -1.09861229, 0. ],
[ 0. , -1.38629436, -1.38629436, -1.38629436, -1.38629436]])
But tf-idf values seems should be non-negative?
By the way, is there a neat way to get the word by its index, or the vocabulary (in the order of word indices) of the Tokenizer() class? Say, sometimes I want to know what's the most frequent word in the documents, then I want to access word with index 1.
I can do it by running:
> > > vocab = tk.word_index.items()
> > > vocab.sort(key=lambda x:x[1])
This gives:
> > > vocab
[('i', 1), ('you', 2), ('love', 3), ('too', 4)]
But is it somehow hacky?
Thank you!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `keras/preprocessing/text.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 '''These preprocessing utilities would greatly benefit
3 from a fast Cython rewrite.
4 '''
5 from __future__ import absolute_import
6
7 import string
8 import sys
9 import numpy as np
10 from six.moves import range
11 from six.moves import zip
12
13 if sys.version_info < (3,):
14 maketrans = string.maketrans
15 else:
16 maketrans = str.maketrans
17
18
19 def base_filter():
20 f = string.punctuation
21 f = f.replace("'", '')
22 f += '\t\n'
23 return f
24
25
26 def text_to_word_sequence(text, filters=base_filter(), lower=True, split=" "):
27 '''prune: sequence of characters to filter out
28 '''
29 if lower:
30 text = text.lower()
31 text = text.translate(maketrans(filters, split*len(filters)))
32 seq = text.split(split)
33 return [_f for _f in seq if _f]
34
35
36 def one_hot(text, n, filters=base_filter(), lower=True, split=" "):
37 seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split)
38 return [(abs(hash(w)) % (n - 1) + 1) for w in seq]
39
40
41 class Tokenizer(object):
42 def __init__(self, nb_words=None, filters=base_filter(),
43 lower=True, split=' ', char_level=False):
44 '''The class allows to vectorize a text corpus, by turning each
45 text into either a sequence of integers (each integer being the index
46 of a token in a dictionary) or into a vector where the coefficient
47 for each token could be binary, based on word count, based on tf-idf...
48
49 # Arguments
50 nb_words: the maximum number of words to keep, based
51 on word frequency. Only the most common `nb_words` words will
52 be kept.
53 filters: a string where each element is a character that will be
54 filtered from the texts. The default is all punctuation, plus
55 tabs and line breaks, minus the `'` character.
56 lower: boolean. Whether to convert the texts to lowercase.
57 split: character or string to use for token splitting.
58 char_level: if True, every character will be treated as a word.
59
60 By default, all punctuation is removed, turning the texts into
61 space-separated sequences of words
62 (words maybe include the `'` character). These sequences are then
63 split into lists of tokens. They will then be indexed or vectorized.
64
65 `0` is a reserved index that won't be assigned to any word.
66 '''
67 self.word_counts = {}
68 self.word_docs = {}
69 self.filters = filters
70 self.split = split
71 self.lower = lower
72 self.nb_words = nb_words
73 self.document_count = 0
74 self.char_level = char_level
75
76 def fit_on_texts(self, texts):
77 '''Required before using texts_to_sequences or texts_to_matrix
78
79 # Arguments
80 texts: can be a list of strings,
81 or a generator of strings (for memory-efficiency)
82 '''
83 self.document_count = 0
84 for text in texts:
85 self.document_count += 1
86 seq = text if self.char_level else text_to_word_sequence(text, self.filters, self.lower, self.split)
87 for w in seq:
88 if w in self.word_counts:
89 self.word_counts[w] += 1
90 else:
91 self.word_counts[w] = 1
92 for w in set(seq):
93 if w in self.word_docs:
94 self.word_docs[w] += 1
95 else:
96 self.word_docs[w] = 1
97
98 wcounts = list(self.word_counts.items())
99 wcounts.sort(key=lambda x: x[1], reverse=True)
100 sorted_voc = [wc[0] for wc in wcounts]
101 self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))
102
103 self.index_docs = {}
104 for w, c in list(self.word_docs.items()):
105 self.index_docs[self.word_index[w]] = c
106
107 def fit_on_sequences(self, sequences):
108 '''Required before using sequences_to_matrix
109 (if fit_on_texts was never called)
110 '''
111 self.document_count = len(sequences)
112 self.index_docs = {}
113 for seq in sequences:
114 seq = set(seq)
115 for i in seq:
116 if i not in self.index_docs:
117 self.index_docs[i] = 1
118 else:
119 self.index_docs[i] += 1
120
121 def texts_to_sequences(self, texts):
122 '''Transforms each text in texts in a sequence of integers.
123 Only top "nb_words" most frequent words will be taken into account.
124 Only words known by the tokenizer will be taken into account.
125
126 Returns a list of sequences.
127 '''
128 res = []
129 for vect in self.texts_to_sequences_generator(texts):
130 res.append(vect)
131 return res
132
133 def texts_to_sequences_generator(self, texts):
134 '''Transforms each text in texts in a sequence of integers.
135 Only top "nb_words" most frequent words will be taken into account.
136 Only words known by the tokenizer will be taken into account.
137
138 Yields individual sequences.
139
140 # Arguments:
141 texts: list of strings.
142 '''
143 nb_words = self.nb_words
144 for text in texts:
145 seq = text if self.char_level else text_to_word_sequence(text, self.filters, self.lower, self.split)
146 vect = []
147 for w in seq:
148 i = self.word_index.get(w)
149 if i is not None:
150 if nb_words and i >= nb_words:
151 continue
152 else:
153 vect.append(i)
154 yield vect
155
156 def texts_to_matrix(self, texts, mode='binary'):
157 '''Convert a list of texts to a Numpy matrix,
158 according to some vectorization mode.
159
160 # Arguments:
161 texts: list of strings.
162 modes: one of "binary", "count", "tfidf", "freq"
163 '''
164 sequences = self.texts_to_sequences(texts)
165 return self.sequences_to_matrix(sequences, mode=mode)
166
167 def sequences_to_matrix(self, sequences, mode='binary'):
168 '''Converts a list of sequences into a Numpy matrix,
169 according to some vectorization mode.
170
171 # Arguments:
172 sequences: list of sequences
173 (a sequence is a list of integer word indices).
174 modes: one of "binary", "count", "tfidf", "freq"
175 '''
176 if not self.nb_words:
177 if self.word_index:
178 nb_words = len(self.word_index) + 1
179 else:
180 raise Exception('Specify a dimension (nb_words argument), '
181 'or fit on some text data first.')
182 else:
183 nb_words = self.nb_words
184
185 if mode == 'tfidf' and not self.document_count:
186 raise Exception('Fit the Tokenizer on some data '
187 'before using tfidf mode.')
188
189 X = np.zeros((len(sequences), nb_words))
190 for i, seq in enumerate(sequences):
191 if not seq:
192 continue
193 counts = {}
194 for j in seq:
195 if j >= nb_words:
196 continue
197 if j not in counts:
198 counts[j] = 1.
199 else:
200 counts[j] += 1
201 for j, c in list(counts.items()):
202 if mode == 'count':
203 X[i][j] = c
204 elif mode == 'freq':
205 X[i][j] = c / len(seq)
206 elif mode == 'binary':
207 X[i][j] = 1
208 elif mode == 'tfidf':
209 # Use weighting scheme 2 in
210 # https://en.wikipedia.org/wiki/Tf%E2%80%93idf
211 tf = 1 + np.log(c)
212 idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0)))
213 X[i][j] = tf * idf
214 else:
215 raise Exception('Unknown vectorization mode: ' + str(mode))
216 return X
217
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py
--- a/keras/preprocessing/text.py
+++ b/keras/preprocessing/text.py
@@ -3,6 +3,7 @@
from a fast Cython rewrite.
'''
from __future__ import absolute_import
+from __future__ import division
import string
import sys
|
{"golden_diff": "diff --git a/keras/preprocessing/text.py b/keras/preprocessing/text.py\n--- a/keras/preprocessing/text.py\n+++ b/keras/preprocessing/text.py\n@@ -3,6 +3,7 @@\n from a fast Cython rewrite.\n '''\n from __future__ import absolute_import\n+from __future__ import division\n \n import string\n import sys\n", "issue": "Why TF-IDF matrix generated by keras.preprocessing.text.Tokenizer() has negative values?\nSay, if run the following script:\n\n> > > import keras\n> > > tk = keras.preprocessing.text.Tokenizer()\n> > > texts = ['I love you.', 'I love you, too.']\n> > > tk.fit_on_texts(texts)\n> > > tk.texts_to_matrix(texts, mode='tfidf')\n\nThe output will be:\narray([[ 0. , -1.09861229, -1.09861229, -1.09861229, 0. ],\n [ 0. , -1.38629436, -1.38629436, -1.38629436, -1.38629436]])\n\nBut tf-idf values seems should be non-negative?\n\nBy the way, is there a neat way to get the word by its index, or the vocabulary (in the order of word indices) of the Tokenizer() class? Say, sometimes I want to know what's the most frequent word in the documents, then I want to access word with index 1.\n\nI can do it by running:\n\n> > > vocab = tk.word_index.items()\n> > > vocab.sort(key=lambda x:x[1])\n\nThis gives:\n\n> > > vocab\n\n[('i', 1), ('you', 2), ('love', 3), ('too', 4)]\nBut is it somehow hacky?\n\nThank you!\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n'''These preprocessing utilities would greatly benefit\nfrom a fast Cython rewrite.\n'''\nfrom __future__ import absolute_import\n\nimport string\nimport sys\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\n\nif sys.version_info < (3,):\n maketrans = string.maketrans\nelse:\n maketrans = str.maketrans\n\n\ndef base_filter():\n f = string.punctuation\n f = f.replace(\"'\", '')\n f += '\\t\\n'\n return f\n\n\ndef text_to_word_sequence(text, filters=base_filter(), lower=True, split=\" \"):\n '''prune: sequence of characters to filter out\n '''\n if lower:\n text = text.lower()\n text = text.translate(maketrans(filters, split*len(filters)))\n seq = text.split(split)\n return [_f for _f in seq if _f]\n\n\ndef one_hot(text, n, filters=base_filter(), lower=True, split=\" \"):\n seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split)\n return [(abs(hash(w)) % (n - 1) + 1) for w in seq]\n\n\nclass Tokenizer(object):\n def __init__(self, nb_words=None, filters=base_filter(),\n lower=True, split=' ', char_level=False):\n '''The class allows to vectorize a text corpus, by turning each\n text into either a sequence of integers (each integer being the index\n of a token in a dictionary) or into a vector where the coefficient\n for each token could be binary, based on word count, based on tf-idf...\n\n # Arguments\n nb_words: the maximum number of words to keep, based\n on word frequency. Only the most common `nb_words` words will\n be kept.\n filters: a string where each element is a character that will be\n filtered from the texts. The default is all punctuation, plus\n tabs and line breaks, minus the `'` character.\n lower: boolean. Whether to convert the texts to lowercase.\n split: character or string to use for token splitting.\n char_level: if True, every character will be treated as a word.\n\n By default, all punctuation is removed, turning the texts into\n space-separated sequences of words\n (words maybe include the `'` character). These sequences are then\n split into lists of tokens. They will then be indexed or vectorized.\n\n `0` is a reserved index that won't be assigned to any word.\n '''\n self.word_counts = {}\n self.word_docs = {}\n self.filters = filters\n self.split = split\n self.lower = lower\n self.nb_words = nb_words\n self.document_count = 0\n self.char_level = char_level\n\n def fit_on_texts(self, texts):\n '''Required before using texts_to_sequences or texts_to_matrix\n\n # Arguments\n texts: can be a list of strings,\n or a generator of strings (for memory-efficiency)\n '''\n self.document_count = 0\n for text in texts:\n self.document_count += 1\n seq = text if self.char_level else text_to_word_sequence(text, self.filters, self.lower, self.split)\n for w in seq:\n if w in self.word_counts:\n self.word_counts[w] += 1\n else:\n self.word_counts[w] = 1\n for w in set(seq):\n if w in self.word_docs:\n self.word_docs[w] += 1\n else:\n self.word_docs[w] = 1\n\n wcounts = list(self.word_counts.items())\n wcounts.sort(key=lambda x: x[1], reverse=True)\n sorted_voc = [wc[0] for wc in wcounts]\n self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))\n\n self.index_docs = {}\n for w, c in list(self.word_docs.items()):\n self.index_docs[self.word_index[w]] = c\n\n def fit_on_sequences(self, sequences):\n '''Required before using sequences_to_matrix\n (if fit_on_texts was never called)\n '''\n self.document_count = len(sequences)\n self.index_docs = {}\n for seq in sequences:\n seq = set(seq)\n for i in seq:\n if i not in self.index_docs:\n self.index_docs[i] = 1\n else:\n self.index_docs[i] += 1\n\n def texts_to_sequences(self, texts):\n '''Transforms each text in texts in a sequence of integers.\n Only top \"nb_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n Returns a list of sequences.\n '''\n res = []\n for vect in self.texts_to_sequences_generator(texts):\n res.append(vect)\n return res\n\n def texts_to_sequences_generator(self, texts):\n '''Transforms each text in texts in a sequence of integers.\n Only top \"nb_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n Yields individual sequences.\n\n # Arguments:\n texts: list of strings.\n '''\n nb_words = self.nb_words\n for text in texts:\n seq = text if self.char_level else text_to_word_sequence(text, self.filters, self.lower, self.split)\n vect = []\n for w in seq:\n i = self.word_index.get(w)\n if i is not None:\n if nb_words and i >= nb_words:\n continue\n else:\n vect.append(i)\n yield vect\n\n def texts_to_matrix(self, texts, mode='binary'):\n '''Convert a list of texts to a Numpy matrix,\n according to some vectorization mode.\n\n # Arguments:\n texts: list of strings.\n modes: one of \"binary\", \"count\", \"tfidf\", \"freq\"\n '''\n sequences = self.texts_to_sequences(texts)\n return self.sequences_to_matrix(sequences, mode=mode)\n\n def sequences_to_matrix(self, sequences, mode='binary'):\n '''Converts a list of sequences into a Numpy matrix,\n according to some vectorization mode.\n\n # Arguments:\n sequences: list of sequences\n (a sequence is a list of integer word indices).\n modes: one of \"binary\", \"count\", \"tfidf\", \"freq\"\n '''\n if not self.nb_words:\n if self.word_index:\n nb_words = len(self.word_index) + 1\n else:\n raise Exception('Specify a dimension (nb_words argument), '\n 'or fit on some text data first.')\n else:\n nb_words = self.nb_words\n\n if mode == 'tfidf' and not self.document_count:\n raise Exception('Fit the Tokenizer on some data '\n 'before using tfidf mode.')\n\n X = np.zeros((len(sequences), nb_words))\n for i, seq in enumerate(sequences):\n if not seq:\n continue\n counts = {}\n for j in seq:\n if j >= nb_words:\n continue\n if j not in counts:\n counts[j] = 1.\n else:\n counts[j] += 1\n for j, c in list(counts.items()):\n if mode == 'count':\n X[i][j] = c\n elif mode == 'freq':\n X[i][j] = c / len(seq)\n elif mode == 'binary':\n X[i][j] = 1\n elif mode == 'tfidf':\n # Use weighting scheme 2 in\n # https://en.wikipedia.org/wiki/Tf%E2%80%93idf\n tf = 1 + np.log(c)\n idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0)))\n X[i][j] = tf * idf\n else:\n raise Exception('Unknown vectorization mode: ' + str(mode))\n return X\n", "path": "keras/preprocessing/text.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n'''These preprocessing utilities would greatly benefit\nfrom a fast Cython rewrite.\n'''\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport string\nimport sys\nimport numpy as np\nfrom six.moves import range\nfrom six.moves import zip\n\nif sys.version_info < (3,):\n maketrans = string.maketrans\nelse:\n maketrans = str.maketrans\n\n\ndef base_filter():\n f = string.punctuation\n f = f.replace(\"'\", '')\n f += '\\t\\n'\n return f\n\n\ndef text_to_word_sequence(text, filters=base_filter(), lower=True, split=\" \"):\n '''prune: sequence of characters to filter out\n '''\n if lower:\n text = text.lower()\n text = text.translate(maketrans(filters, split*len(filters)))\n seq = text.split(split)\n return [_f for _f in seq if _f]\n\n\ndef one_hot(text, n, filters=base_filter(), lower=True, split=\" \"):\n seq = text_to_word_sequence(text, filters=filters, lower=lower, split=split)\n return [(abs(hash(w)) % (n - 1) + 1) for w in seq]\n\n\nclass Tokenizer(object):\n def __init__(self, nb_words=None, filters=base_filter(),\n lower=True, split=' ', char_level=False):\n '''The class allows to vectorize a text corpus, by turning each\n text into either a sequence of integers (each integer being the index\n of a token in a dictionary) or into a vector where the coefficient\n for each token could be binary, based on word count, based on tf-idf...\n\n # Arguments\n nb_words: the maximum number of words to keep, based\n on word frequency. Only the most common `nb_words` words will\n be kept.\n filters: a string where each element is a character that will be\n filtered from the texts. The default is all punctuation, plus\n tabs and line breaks, minus the `'` character.\n lower: boolean. Whether to convert the texts to lowercase.\n split: character or string to use for token splitting.\n char_level: if True, every character will be treated as a word.\n\n By default, all punctuation is removed, turning the texts into\n space-separated sequences of words\n (words maybe include the `'` character). These sequences are then\n split into lists of tokens. They will then be indexed or vectorized.\n\n `0` is a reserved index that won't be assigned to any word.\n '''\n self.word_counts = {}\n self.word_docs = {}\n self.filters = filters\n self.split = split\n self.lower = lower\n self.nb_words = nb_words\n self.document_count = 0\n self.char_level = char_level\n\n def fit_on_texts(self, texts):\n '''Required before using texts_to_sequences or texts_to_matrix\n\n # Arguments\n texts: can be a list of strings,\n or a generator of strings (for memory-efficiency)\n '''\n self.document_count = 0\n for text in texts:\n self.document_count += 1\n seq = text if self.char_level else text_to_word_sequence(text, self.filters, self.lower, self.split)\n for w in seq:\n if w in self.word_counts:\n self.word_counts[w] += 1\n else:\n self.word_counts[w] = 1\n for w in set(seq):\n if w in self.word_docs:\n self.word_docs[w] += 1\n else:\n self.word_docs[w] = 1\n\n wcounts = list(self.word_counts.items())\n wcounts.sort(key=lambda x: x[1], reverse=True)\n sorted_voc = [wc[0] for wc in wcounts]\n self.word_index = dict(list(zip(sorted_voc, list(range(1, len(sorted_voc) + 1)))))\n\n self.index_docs = {}\n for w, c in list(self.word_docs.items()):\n self.index_docs[self.word_index[w]] = c\n\n def fit_on_sequences(self, sequences):\n '''Required before using sequences_to_matrix\n (if fit_on_texts was never called)\n '''\n self.document_count = len(sequences)\n self.index_docs = {}\n for seq in sequences:\n seq = set(seq)\n for i in seq:\n if i not in self.index_docs:\n self.index_docs[i] = 1\n else:\n self.index_docs[i] += 1\n\n def texts_to_sequences(self, texts):\n '''Transforms each text in texts in a sequence of integers.\n Only top \"nb_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n Returns a list of sequences.\n '''\n res = []\n for vect in self.texts_to_sequences_generator(texts):\n res.append(vect)\n return res\n\n def texts_to_sequences_generator(self, texts):\n '''Transforms each text in texts in a sequence of integers.\n Only top \"nb_words\" most frequent words will be taken into account.\n Only words known by the tokenizer will be taken into account.\n\n Yields individual sequences.\n\n # Arguments:\n texts: list of strings.\n '''\n nb_words = self.nb_words\n for text in texts:\n seq = text if self.char_level else text_to_word_sequence(text, self.filters, self.lower, self.split)\n vect = []\n for w in seq:\n i = self.word_index.get(w)\n if i is not None:\n if nb_words and i >= nb_words:\n continue\n else:\n vect.append(i)\n yield vect\n\n def texts_to_matrix(self, texts, mode='binary'):\n '''Convert a list of texts to a Numpy matrix,\n according to some vectorization mode.\n\n # Arguments:\n texts: list of strings.\n modes: one of \"binary\", \"count\", \"tfidf\", \"freq\"\n '''\n sequences = self.texts_to_sequences(texts)\n return self.sequences_to_matrix(sequences, mode=mode)\n\n def sequences_to_matrix(self, sequences, mode='binary'):\n '''Converts a list of sequences into a Numpy matrix,\n according to some vectorization mode.\n\n # Arguments:\n sequences: list of sequences\n (a sequence is a list of integer word indices).\n modes: one of \"binary\", \"count\", \"tfidf\", \"freq\"\n '''\n if not self.nb_words:\n if self.word_index:\n nb_words = len(self.word_index) + 1\n else:\n raise Exception('Specify a dimension (nb_words argument), '\n 'or fit on some text data first.')\n else:\n nb_words = self.nb_words\n\n if mode == 'tfidf' and not self.document_count:\n raise Exception('Fit the Tokenizer on some data '\n 'before using tfidf mode.')\n\n X = np.zeros((len(sequences), nb_words))\n for i, seq in enumerate(sequences):\n if not seq:\n continue\n counts = {}\n for j in seq:\n if j >= nb_words:\n continue\n if j not in counts:\n counts[j] = 1.\n else:\n counts[j] += 1\n for j, c in list(counts.items()):\n if mode == 'count':\n X[i][j] = c\n elif mode == 'freq':\n X[i][j] = c / len(seq)\n elif mode == 'binary':\n X[i][j] = 1\n elif mode == 'tfidf':\n # Use weighting scheme 2 in\n # https://en.wikipedia.org/wiki/Tf%E2%80%93idf\n tf = 1 + np.log(c)\n idf = np.log(1 + self.document_count / (1 + self.index_docs.get(j, 0)))\n X[i][j] = tf * idf\n else:\n raise Exception('Unknown vectorization mode: ' + str(mode))\n return X\n", "path": "keras/preprocessing/text.py"}]}
| 2,899 | 80 |
gh_patches_debug_7770
|
rasdani/github-patches
|
git_diff
|
pandas-dev__pandas-8238
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
BUG: rolling_window yields unexpected results with win_type='triang'
Here's the example in the documentation, modified to have non-zero mean:
```
n = 100
ser = pandas.Series(randn(n)+10, index=pandas.date_range('1/1/2000', periods=n))
pandas.rolling_window(ser, 5, 'triang').plot()
pandas.rolling_window(ser, 5, 'boxcar').plot()
```
The rolling boxcar window is centered around 10, as expected.
The triang window is centered around 6. That suggests that the weights in the window don't add up to 1.
Either that or my expectation of how it should work is wrong?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `pandas/util/print_versions.py`
Content:
```
1 import os
2 import platform
3 import sys
4 import struct
5 import subprocess
6 import codecs
7
8
9 def get_sys_info():
10 "Returns system information as a dict"
11
12 blob = []
13
14 # get full commit hash
15 commit = None
16 if os.path.isdir(".git") and os.path.isdir("pandas"):
17 try:
18 pipe = subprocess.Popen('git log --format="%H" -n 1'.split(" "),
19 stdout=subprocess.PIPE, stderr=subprocess.PIPE)
20 so, serr = pipe.communicate()
21 except:
22 pass
23 else:
24 if pipe.returncode == 0:
25 commit = so
26 try:
27 commit = so.decode('utf-8')
28 except ValueError:
29 pass
30 commit = commit.strip().strip('"')
31
32 blob.append(('commit', commit))
33
34 try:
35 sysname, nodename, release, version, machine, processor = platform.uname(
36 )
37 blob.extend([
38 ("python", "%d.%d.%d.%s.%s" % sys.version_info[:]),
39 ("python-bits", struct.calcsize("P") * 8),
40 ("OS", "%s" % (sysname)),
41 ("OS-release", "%s" % (release)),
42 # ("Version", "%s" % (version)),
43 ("machine", "%s" % (machine)),
44 ("processor", "%s" % (processor)),
45 ("byteorder", "%s" % sys.byteorder),
46 ("LC_ALL", "%s" % os.environ.get('LC_ALL', "None")),
47 ("LANG", "%s" % os.environ.get('LANG', "None")),
48
49 ])
50 except:
51 pass
52
53 return blob
54
55
56 def show_versions(as_json=False):
57 import imp
58 sys_info = get_sys_info()
59
60 deps = [
61 # (MODULE_NAME, f(mod) -> mod version)
62 ("pandas", lambda mod: mod.__version__),
63 ("nose", lambda mod: mod.__version__),
64 ("Cython", lambda mod: mod.__version__),
65 ("numpy", lambda mod: mod.version.version),
66 ("scipy", lambda mod: mod.version.version),
67 ("statsmodels", lambda mod: mod.__version__),
68 ("IPython", lambda mod: mod.__version__),
69 ("sphinx", lambda mod: mod.__version__),
70 ("patsy", lambda mod: mod.__version__),
71 ("scikits.timeseries", lambda mod: mod.__version__),
72 ("dateutil", lambda mod: mod.__version__),
73 ("pytz", lambda mod: mod.VERSION),
74 ("bottleneck", lambda mod: mod.__version__),
75 ("tables", lambda mod: mod.__version__),
76 ("numexpr", lambda mod: mod.__version__),
77 ("matplotlib", lambda mod: mod.__version__),
78 ("openpyxl", lambda mod: mod.__version__),
79 ("xlrd", lambda mod: mod.__VERSION__),
80 ("xlwt", lambda mod: mod.__VERSION__),
81 ("xlsxwriter", lambda mod: mod.__version__),
82 ("lxml", lambda mod: mod.etree.__version__),
83 ("bs4", lambda mod: mod.__version__),
84 ("html5lib", lambda mod: mod.__version__),
85 ("httplib2", lambda mod: mod.__version__),
86 ("apiclient", lambda mod: mod.__version__),
87 ("rpy2", lambda mod: mod.__version__),
88 ("sqlalchemy", lambda mod: mod.__version__),
89 ("pymysql", lambda mod: mod.__version__),
90 ("psycopg2", lambda mod: mod.__version__),
91 ]
92
93 deps_blob = list()
94 for (modname, ver_f) in deps:
95 try:
96 try:
97 mod = imp.load_module(modname, *imp.find_module(modname))
98 except (ImportError):
99 import importlib
100 mod = importlib.import_module(modname)
101 ver = ver_f(mod)
102 deps_blob.append((modname, ver))
103 except:
104 deps_blob.append((modname, None))
105
106 if (as_json):
107 # 2.6-safe
108 try:
109 import json
110 except:
111 import simplejson as json
112
113 j = dict(system=dict(sys_info), dependencies=dict(deps_blob))
114
115 if as_json == True:
116 print(j)
117 else:
118 with codecs.open(as_json, "wb", encoding='utf8') as f:
119 json.dump(j, f, indent=2)
120
121 else:
122
123 print("\nINSTALLED VERSIONS")
124 print("------------------")
125
126 for k, stat in sys_info:
127 print("%s: %s" % (k, stat))
128
129 print("")
130 for k, stat in deps_blob:
131 print("%s: %s" % (k, stat))
132
133
134 def main():
135 # optparse is 2.6-safe
136 from optparse import OptionParser
137 parser = OptionParser()
138 parser.add_option("-j", "--json", metavar="FILE", nargs=1,
139 help="Save output as JSON into file, pass in '-' to output to stdout")
140
141 (options, args) = parser.parse_args()
142
143 if options.json == "-":
144 options.json = True
145
146 show_versions(as_json=options.json)
147
148 return 0
149
150 if __name__ == "__main__":
151 sys.exit(main())
152
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py
--- a/pandas/util/print_versions.py
+++ b/pandas/util/print_versions.py
@@ -68,7 +68,6 @@
("IPython", lambda mod: mod.__version__),
("sphinx", lambda mod: mod.__version__),
("patsy", lambda mod: mod.__version__),
- ("scikits.timeseries", lambda mod: mod.__version__),
("dateutil", lambda mod: mod.__version__),
("pytz", lambda mod: mod.VERSION),
("bottleneck", lambda mod: mod.__version__),
|
{"golden_diff": "diff --git a/pandas/util/print_versions.py b/pandas/util/print_versions.py\n--- a/pandas/util/print_versions.py\n+++ b/pandas/util/print_versions.py\n@@ -68,7 +68,6 @@\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n (\"patsy\", lambda mod: mod.__version__),\n- (\"scikits.timeseries\", lambda mod: mod.__version__),\n (\"dateutil\", lambda mod: mod.__version__),\n (\"pytz\", lambda mod: mod.VERSION),\n (\"bottleneck\", lambda mod: mod.__version__),\n", "issue": "BUG: rolling_window yields unexpected results with win_type='triang'\nHere's the example in the documentation, modified to have non-zero mean:\n\n```\nn = 100\nser = pandas.Series(randn(n)+10, index=pandas.date_range('1/1/2000', periods=n))\npandas.rolling_window(ser, 5, 'triang').plot()\npandas.rolling_window(ser, 5, 'boxcar').plot()\n```\n\nThe rolling boxcar window is centered around 10, as expected.\n\nThe triang window is centered around 6. That suggests that the weights in the window don't add up to 1.\n\nEither that or my expectation of how it should work is wrong?\n\n", "before_files": [{"content": "import os\nimport platform\nimport sys\nimport struct\nimport subprocess\nimport codecs\n\n\ndef get_sys_info():\n \"Returns system information as a dict\"\n\n blob = []\n\n # get full commit hash\n commit = None\n if os.path.isdir(\".git\") and os.path.isdir(\"pandas\"):\n try:\n pipe = subprocess.Popen('git log --format=\"%H\" -n 1'.split(\" \"),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so, serr = pipe.communicate()\n except:\n pass\n else:\n if pipe.returncode == 0:\n commit = so\n try:\n commit = so.decode('utf-8')\n except ValueError:\n pass\n commit = commit.strip().strip('\"')\n\n blob.append(('commit', commit))\n\n try:\n sysname, nodename, release, version, machine, processor = platform.uname(\n )\n blob.extend([\n (\"python\", \"%d.%d.%d.%s.%s\" % sys.version_info[:]),\n (\"python-bits\", struct.calcsize(\"P\") * 8),\n (\"OS\", \"%s\" % (sysname)),\n (\"OS-release\", \"%s\" % (release)),\n # (\"Version\", \"%s\" % (version)),\n (\"machine\", \"%s\" % (machine)),\n (\"processor\", \"%s\" % (processor)),\n (\"byteorder\", \"%s\" % sys.byteorder),\n (\"LC_ALL\", \"%s\" % os.environ.get('LC_ALL', \"None\")),\n (\"LANG\", \"%s\" % os.environ.get('LANG', \"None\")),\n\n ])\n except:\n pass\n\n return blob\n\n\ndef show_versions(as_json=False):\n import imp\n sys_info = get_sys_info()\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"pandas\", lambda mod: mod.__version__),\n (\"nose\", lambda mod: mod.__version__),\n (\"Cython\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.version.version),\n (\"scipy\", lambda mod: mod.version.version),\n (\"statsmodels\", lambda mod: mod.__version__),\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n (\"patsy\", lambda mod: mod.__version__),\n (\"scikits.timeseries\", lambda mod: mod.__version__),\n (\"dateutil\", lambda mod: mod.__version__),\n (\"pytz\", lambda mod: mod.VERSION),\n (\"bottleneck\", lambda mod: mod.__version__),\n (\"tables\", lambda mod: mod.__version__),\n (\"numexpr\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"openpyxl\", lambda mod: mod.__version__),\n (\"xlrd\", lambda mod: mod.__VERSION__),\n (\"xlwt\", lambda mod: mod.__VERSION__),\n (\"xlsxwriter\", lambda mod: mod.__version__),\n (\"lxml\", lambda mod: mod.etree.__version__),\n (\"bs4\", lambda mod: mod.__version__),\n (\"html5lib\", lambda mod: mod.__version__),\n (\"httplib2\", lambda mod: mod.__version__),\n (\"apiclient\", lambda mod: mod.__version__),\n (\"rpy2\", lambda mod: mod.__version__),\n (\"sqlalchemy\", lambda mod: mod.__version__),\n (\"pymysql\", lambda mod: mod.__version__),\n (\"psycopg2\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n try:\n mod = imp.load_module(modname, *imp.find_module(modname))\n except (ImportError):\n import importlib\n mod = importlib.import_module(modname)\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except:\n deps_blob.append((modname, None))\n\n if (as_json):\n # 2.6-safe\n try:\n import json\n except:\n import simplejson as json\n\n j = dict(system=dict(sys_info), dependencies=dict(deps_blob))\n\n if as_json == True:\n print(j)\n else:\n with codecs.open(as_json, \"wb\", encoding='utf8') as f:\n json.dump(j, f, indent=2)\n\n else:\n\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n\n for k, stat in sys_info:\n print(\"%s: %s\" % (k, stat))\n\n print(\"\")\n for k, stat in deps_blob:\n print(\"%s: %s\" % (k, stat))\n\n\ndef main():\n # optparse is 2.6-safe\n from optparse import OptionParser\n parser = OptionParser()\n parser.add_option(\"-j\", \"--json\", metavar=\"FILE\", nargs=1,\n help=\"Save output as JSON into file, pass in '-' to output to stdout\")\n\n (options, args) = parser.parse_args()\n\n if options.json == \"-\":\n options.json = True\n\n show_versions(as_json=options.json)\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "pandas/util/print_versions.py"}], "after_files": [{"content": "import os\nimport platform\nimport sys\nimport struct\nimport subprocess\nimport codecs\n\n\ndef get_sys_info():\n \"Returns system information as a dict\"\n\n blob = []\n\n # get full commit hash\n commit = None\n if os.path.isdir(\".git\") and os.path.isdir(\"pandas\"):\n try:\n pipe = subprocess.Popen('git log --format=\"%H\" -n 1'.split(\" \"),\n stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n so, serr = pipe.communicate()\n except:\n pass\n else:\n if pipe.returncode == 0:\n commit = so\n try:\n commit = so.decode('utf-8')\n except ValueError:\n pass\n commit = commit.strip().strip('\"')\n\n blob.append(('commit', commit))\n\n try:\n sysname, nodename, release, version, machine, processor = platform.uname(\n )\n blob.extend([\n (\"python\", \"%d.%d.%d.%s.%s\" % sys.version_info[:]),\n (\"python-bits\", struct.calcsize(\"P\") * 8),\n (\"OS\", \"%s\" % (sysname)),\n (\"OS-release\", \"%s\" % (release)),\n # (\"Version\", \"%s\" % (version)),\n (\"machine\", \"%s\" % (machine)),\n (\"processor\", \"%s\" % (processor)),\n (\"byteorder\", \"%s\" % sys.byteorder),\n (\"LC_ALL\", \"%s\" % os.environ.get('LC_ALL', \"None\")),\n (\"LANG\", \"%s\" % os.environ.get('LANG', \"None\")),\n\n ])\n except:\n pass\n\n return blob\n\n\ndef show_versions(as_json=False):\n import imp\n sys_info = get_sys_info()\n\n deps = [\n # (MODULE_NAME, f(mod) -> mod version)\n (\"pandas\", lambda mod: mod.__version__),\n (\"nose\", lambda mod: mod.__version__),\n (\"Cython\", lambda mod: mod.__version__),\n (\"numpy\", lambda mod: mod.version.version),\n (\"scipy\", lambda mod: mod.version.version),\n (\"statsmodels\", lambda mod: mod.__version__),\n (\"IPython\", lambda mod: mod.__version__),\n (\"sphinx\", lambda mod: mod.__version__),\n (\"patsy\", lambda mod: mod.__version__),\n (\"dateutil\", lambda mod: mod.__version__),\n (\"pytz\", lambda mod: mod.VERSION),\n (\"bottleneck\", lambda mod: mod.__version__),\n (\"tables\", lambda mod: mod.__version__),\n (\"numexpr\", lambda mod: mod.__version__),\n (\"matplotlib\", lambda mod: mod.__version__),\n (\"openpyxl\", lambda mod: mod.__version__),\n (\"xlrd\", lambda mod: mod.__VERSION__),\n (\"xlwt\", lambda mod: mod.__VERSION__),\n (\"xlsxwriter\", lambda mod: mod.__version__),\n (\"lxml\", lambda mod: mod.etree.__version__),\n (\"bs4\", lambda mod: mod.__version__),\n (\"html5lib\", lambda mod: mod.__version__),\n (\"httplib2\", lambda mod: mod.__version__),\n (\"apiclient\", lambda mod: mod.__version__),\n (\"rpy2\", lambda mod: mod.__version__),\n (\"sqlalchemy\", lambda mod: mod.__version__),\n (\"pymysql\", lambda mod: mod.__version__),\n (\"psycopg2\", lambda mod: mod.__version__),\n ]\n\n deps_blob = list()\n for (modname, ver_f) in deps:\n try:\n try:\n mod = imp.load_module(modname, *imp.find_module(modname))\n except (ImportError):\n import importlib\n mod = importlib.import_module(modname)\n ver = ver_f(mod)\n deps_blob.append((modname, ver))\n except:\n deps_blob.append((modname, None))\n\n if (as_json):\n # 2.6-safe\n try:\n import json\n except:\n import simplejson as json\n\n j = dict(system=dict(sys_info), dependencies=dict(deps_blob))\n\n if as_json == True:\n print(j)\n else:\n with codecs.open(as_json, \"wb\", encoding='utf8') as f:\n json.dump(j, f, indent=2)\n\n else:\n\n print(\"\\nINSTALLED VERSIONS\")\n print(\"------------------\")\n\n for k, stat in sys_info:\n print(\"%s: %s\" % (k, stat))\n\n print(\"\")\n for k, stat in deps_blob:\n print(\"%s: %s\" % (k, stat))\n\n\ndef main():\n # optparse is 2.6-safe\n from optparse import OptionParser\n parser = OptionParser()\n parser.add_option(\"-j\", \"--json\", metavar=\"FILE\", nargs=1,\n help=\"Save output as JSON into file, pass in '-' to output to stdout\")\n\n (options, args) = parser.parse_args()\n\n if options.json == \"-\":\n options.json = True\n\n show_versions(as_json=options.json)\n\n return 0\n\nif __name__ == \"__main__\":\n sys.exit(main())\n", "path": "pandas/util/print_versions.py"}]}
| 1,923 | 143 |
gh_patches_debug_22686
|
rasdani/github-patches
|
git_diff
|
WordPress__openverse-api-932
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
`piexif.dump` errors are not safely handled
## Sentry link
<!-- The public (aka "share") Sentry issue link. -->
https://sentry.io/share/issue/a80d52de7f89436586ed0250cd0a32d2/
## Description
<!-- Example: We are trying to access property foo of ImportantClass but the instance is null. -->
<!-- Include any additional information you may have, including potential remedies if any come to mind, and the general context of the code (what causes it to run in the app). -->
The call to `piexif.dump` should be wrapped in a `try/except` to prevent these errors in the watermark endpoint.
<!-- Mention whether this is a known regression, i.e., the feature used to work and now does not. -->
## Reproduction
<!-- Share the steps to reproduce the issue, if you were able to, OR a note sharing that you tried to reproduce but weren’t able to. -->
Visit https://api-dev.openverse.engineering/v1/images/a913fde1-d524-4059-bd4f-9bd687578cc3/watermark/ to see an example of this failure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `api/catalog/api/utils/watermark.py`
Content:
```
1 import logging
2 import os
3 from enum import Flag, auto
4 from io import BytesIO
5 from textwrap import wrap
6
7 from django.conf import settings
8
9 import piexif
10 import requests
11 from PIL import Image, ImageDraw, ImageFont
12 from sentry_sdk import capture_exception
13
14
15 parent_logger = logging.getLogger(__name__)
16
17
18 BREAKPOINT_DIMENSION = 400 # 400px
19 MARGIN_RATIO = 0.04 # 4%
20 FONT_RATIO = 0.04 # 4%
21
22 FRAME_COLOR = "#fff" # White frame
23 TEXT_COLOR = "#000" # Black text
24 HEADERS = {
25 "User-Agent": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose="Watermark")
26 }
27
28
29 class Dimension(Flag):
30 """
31 This enum represents the two dimensions of an image
32 """
33
34 HEIGHT = auto()
35 WIDTH = auto()
36 BOTH = HEIGHT | WIDTH
37 NONE = 0
38
39
40 # Utils
41
42
43 def _smaller_dimension(width, height):
44 """
45 Determine which image dimensions are below the breakpoint dimensions
46 :param width: the width of the image
47 :param height: the height of the image
48 :return: True if the image is small, False otherwise
49 """
50
51 smaller_dimension = Dimension.NONE
52 if width < BREAKPOINT_DIMENSION:
53 smaller_dimension = smaller_dimension | Dimension.WIDTH
54 if height < BREAKPOINT_DIMENSION:
55 smaller_dimension = smaller_dimension | Dimension.HEIGHT
56 return smaller_dimension
57
58
59 def _get_font_path(monospace=False):
60 """
61 Return the path to the TTF font file
62 :param monospace: True for monospaced font, False for variable-width font
63 :return: the path to the TTF font file
64 """
65
66 font_name = "SourceCodePro-Bold.ttf" if monospace else "SourceSansPro-Bold.ttf"
67 font_path = os.path.join(os.path.dirname(__file__), "fonts", font_name)
68
69 return font_path
70
71
72 def _fit_in_width(text, font, max_width):
73 """
74 Break the given text so that it fits in the given space
75 :param text: the text to fit in the limited width
76 :param font: the font containing size and other info
77 :param max_width: the maximum width the text is allowed to take
78 :return: the fitted text
79 """
80
81 char_width, _ = font.getsize("x") # x has the closest to average width
82 max_chars = max_width // char_width
83
84 text = "\n".join(["\n".join(wrap(line, max_chars)) for line in text.split("\n")])
85
86 return text
87
88
89 # Framing
90
91
92 def _create_frame(dimensions):
93 """
94 Creates an frame with the given dimensions
95 :param dimensions: a tuple containing the width and height of the frame
96 :return: a white frame with the given dimensions
97 """
98
99 return Image.new("RGB", dimensions, FRAME_COLOR)
100
101
102 def _frame_image(image, frame, left_margin, top_margin):
103 """
104 Fix the image in the frame with the specified spacing
105 :param image: the image to frame
106 :param frame: the frame in which to fit the image
107 :param left_margin: the margin to the left of the image
108 :param top_margin: the margin to the top of the image
109 :return: the framed image
110 """
111
112 frame.paste(image, (left_margin, top_margin))
113 return frame
114
115
116 # Attribution
117
118
119 def _full_license(image_info):
120 """
121 Get the full license from the image info
122 :param image_info: the information about a particular image
123 :return: the full license text for the image
124 """
125
126 license_name = image_info["license"].upper()
127 license_version = image_info["license_version"].upper()
128 prefix = "" if license_name == "CC0" else "CC "
129
130 return f"{prefix}{license_name} {license_version}"
131
132
133 def _get_attribution_text(image_info):
134 """
135 Generate the attribution text from the image info
136 :param image_info: the info pertaining to the licensing of the image
137 :return: the attribution text
138 """
139
140 title = image_info["title"]
141 creator = image_info["creator"]
142 full_license = _full_license(image_info)
143
144 return f'"{title}" by {creator} is licensed under {full_license}.'
145
146
147 # Actions
148
149
150 def _open_image(url):
151 """
152 Read an image from a URL and convert it into a PIL Image object
153 :param url: the URL from where to read the image
154 :return: the PIL image object with the EXIF data
155 """
156 logger = parent_logger.getChild("_open_image")
157 try:
158 response = requests.get(url, headers=HEADERS)
159 img_bytes = BytesIO(response.content)
160 img = Image.open(img_bytes)
161 # Preserve EXIF metadata
162 if "exif" in img.info:
163 exif = piexif.load(img.info["exif"])
164 else:
165 exif = None
166 return img, exif
167 except requests.exceptions.RequestException as e:
168 capture_exception(e)
169 logger.error(f"Error loading image data: {e}")
170 return None, None
171
172
173 def _print_attribution_on_image(img, image_info):
174 """
175 Add a frame around the image and put the attribution text on the bottom
176 :param img: the image to frame and attribute
177 :param image_info: the information about a particular image
178 :return: return the framed and attributed image
179 """
180
181 width, height = img.size
182 smaller_dimension = _smaller_dimension(width, height)
183
184 if smaller_dimension is Dimension.NONE:
185 margin = round(MARGIN_RATIO * min(width, height))
186 font_size = round(FONT_RATIO * min(width, height))
187 new_width = width
188 else:
189 margin = round(MARGIN_RATIO * BREAKPOINT_DIMENSION)
190 font_size = round(FONT_RATIO * BREAKPOINT_DIMENSION)
191 new_width = (
192 BREAKPOINT_DIMENSION if Dimension.WIDTH in smaller_dimension else width
193 )
194
195 font = ImageFont.truetype(_get_font_path(), size=font_size)
196
197 text = _get_attribution_text(image_info)
198 text = _fit_in_width(text, font, new_width)
199 _, attribution_height = font.getsize_multiline(text)
200
201 frame_width = margin + new_width + margin
202 frame_height = margin + height + margin + attribution_height + margin
203 left_margin = (frame_width - width) // 2
204
205 frame = _create_frame(
206 (
207 frame_width,
208 frame_height,
209 )
210 )
211 _frame_image(img, frame, left_margin, margin)
212
213 draw = ImageDraw.Draw(frame)
214 text_position_x = margin
215 text_position_y = margin + height + margin
216 draw.text(
217 xy=(
218 text_position_x,
219 text_position_y,
220 ),
221 text=text,
222 font=font,
223 fill=TEXT_COLOR,
224 )
225
226 return frame
227
228
229 def watermark(image_url, info, draw_frame=True):
230 """
231 Returns a PIL Image with a watermark and embedded metadata.
232
233 :param image_url: The URL of the image.
234 :param info: A dictionary with keys title, creator, license, and
235 license_version
236 :param draw_frame: Whether to draw an attribution frame.
237 :returns: A PIL Image and its EXIF data, if included.
238 """
239
240 img, exif = _open_image(image_url)
241 if not draw_frame:
242 return img, exif
243 frame = _print_attribution_on_image(img, info)
244 return frame, exif
245
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/api/catalog/api/utils/watermark.py b/api/catalog/api/utils/watermark.py
--- a/api/catalog/api/utils/watermark.py
+++ b/api/catalog/api/utils/watermark.py
@@ -1,5 +1,6 @@
import logging
import os
+import struct
from enum import Flag, auto
from io import BytesIO
from textwrap import wrap
@@ -158,17 +159,18 @@
response = requests.get(url, headers=HEADERS)
img_bytes = BytesIO(response.content)
img = Image.open(img_bytes)
- # Preserve EXIF metadata
- if "exif" in img.info:
- exif = piexif.load(img.info["exif"])
- else:
- exif = None
- return img, exif
except requests.exceptions.RequestException as e:
capture_exception(e)
logger.error(f"Error loading image data: {e}")
return None, None
+ try:
+ # Preserve EXIF metadata
+ exif = piexif.load(img.info["exif"]) if "exif" in img.info else None
+ return img, exif
+ except struct.error:
+ return img, None
+
def _print_attribution_on_image(img, image_info):
"""
|
{"golden_diff": "diff --git a/api/catalog/api/utils/watermark.py b/api/catalog/api/utils/watermark.py\n--- a/api/catalog/api/utils/watermark.py\n+++ b/api/catalog/api/utils/watermark.py\n@@ -1,5 +1,6 @@\n import logging\n import os\n+import struct\n from enum import Flag, auto\n from io import BytesIO\n from textwrap import wrap\n@@ -158,17 +159,18 @@\n response = requests.get(url, headers=HEADERS)\n img_bytes = BytesIO(response.content)\n img = Image.open(img_bytes)\n- # Preserve EXIF metadata\n- if \"exif\" in img.info:\n- exif = piexif.load(img.info[\"exif\"])\n- else:\n- exif = None\n- return img, exif\n except requests.exceptions.RequestException as e:\n capture_exception(e)\n logger.error(f\"Error loading image data: {e}\")\n return None, None\n \n+ try:\n+ # Preserve EXIF metadata\n+ exif = piexif.load(img.info[\"exif\"]) if \"exif\" in img.info else None\n+ return img, exif\n+ except struct.error:\n+ return img, None\n+\n \n def _print_attribution_on_image(img, image_info):\n \"\"\"\n", "issue": "`piexif.dump` errors are not safely handled\n## Sentry link\r\n\r\n<!-- The public (aka \"share\") Sentry issue link. -->\r\nhttps://sentry.io/share/issue/a80d52de7f89436586ed0250cd0a32d2/\r\n\r\n## Description\r\n\r\n<!-- Example: We are trying to access property foo of ImportantClass but the instance is null. -->\r\n<!-- Include any additional information you may have, including potential remedies if any come to mind, and the general context of the code (what causes it to run in the app). -->\r\nThe call to `piexif.dump` should be wrapped in a `try/except` to prevent these errors in the watermark endpoint.\r\n\r\n<!-- Mention whether this is a known regression, i.e., the feature used to work and now does not. -->\r\n\r\n## Reproduction\r\n\r\n<!-- Share the steps to reproduce the issue, if you were able to, OR a note sharing that you tried to reproduce but weren\u2019t able to. -->\r\nVisit https://api-dev.openverse.engineering/v1/images/a913fde1-d524-4059-bd4f-9bd687578cc3/watermark/ to see an example of this failure.\n", "before_files": [{"content": "import logging\nimport os\nfrom enum import Flag, auto\nfrom io import BytesIO\nfrom textwrap import wrap\n\nfrom django.conf import settings\n\nimport piexif\nimport requests\nfrom PIL import Image, ImageDraw, ImageFont\nfrom sentry_sdk import capture_exception\n\n\nparent_logger = logging.getLogger(__name__)\n\n\nBREAKPOINT_DIMENSION = 400 # 400px\nMARGIN_RATIO = 0.04 # 4%\nFONT_RATIO = 0.04 # 4%\n\nFRAME_COLOR = \"#fff\" # White frame\nTEXT_COLOR = \"#000\" # Black text\nHEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"Watermark\")\n}\n\n\nclass Dimension(Flag):\n \"\"\"\n This enum represents the two dimensions of an image\n \"\"\"\n\n HEIGHT = auto()\n WIDTH = auto()\n BOTH = HEIGHT | WIDTH\n NONE = 0\n\n\n# Utils\n\n\ndef _smaller_dimension(width, height):\n \"\"\"\n Determine which image dimensions are below the breakpoint dimensions\n :param width: the width of the image\n :param height: the height of the image\n :return: True if the image is small, False otherwise\n \"\"\"\n\n smaller_dimension = Dimension.NONE\n if width < BREAKPOINT_DIMENSION:\n smaller_dimension = smaller_dimension | Dimension.WIDTH\n if height < BREAKPOINT_DIMENSION:\n smaller_dimension = smaller_dimension | Dimension.HEIGHT\n return smaller_dimension\n\n\ndef _get_font_path(monospace=False):\n \"\"\"\n Return the path to the TTF font file\n :param monospace: True for monospaced font, False for variable-width font\n :return: the path to the TTF font file\n \"\"\"\n\n font_name = \"SourceCodePro-Bold.ttf\" if monospace else \"SourceSansPro-Bold.ttf\"\n font_path = os.path.join(os.path.dirname(__file__), \"fonts\", font_name)\n\n return font_path\n\n\ndef _fit_in_width(text, font, max_width):\n \"\"\"\n Break the given text so that it fits in the given space\n :param text: the text to fit in the limited width\n :param font: the font containing size and other info\n :param max_width: the maximum width the text is allowed to take\n :return: the fitted text\n \"\"\"\n\n char_width, _ = font.getsize(\"x\") # x has the closest to average width\n max_chars = max_width // char_width\n\n text = \"\\n\".join([\"\\n\".join(wrap(line, max_chars)) for line in text.split(\"\\n\")])\n\n return text\n\n\n# Framing\n\n\ndef _create_frame(dimensions):\n \"\"\"\n Creates an frame with the given dimensions\n :param dimensions: a tuple containing the width and height of the frame\n :return: a white frame with the given dimensions\n \"\"\"\n\n return Image.new(\"RGB\", dimensions, FRAME_COLOR)\n\n\ndef _frame_image(image, frame, left_margin, top_margin):\n \"\"\"\n Fix the image in the frame with the specified spacing\n :param image: the image to frame\n :param frame: the frame in which to fit the image\n :param left_margin: the margin to the left of the image\n :param top_margin: the margin to the top of the image\n :return: the framed image\n \"\"\"\n\n frame.paste(image, (left_margin, top_margin))\n return frame\n\n\n# Attribution\n\n\ndef _full_license(image_info):\n \"\"\"\n Get the full license from the image info\n :param image_info: the information about a particular image\n :return: the full license text for the image\n \"\"\"\n\n license_name = image_info[\"license\"].upper()\n license_version = image_info[\"license_version\"].upper()\n prefix = \"\" if license_name == \"CC0\" else \"CC \"\n\n return f\"{prefix}{license_name} {license_version}\"\n\n\ndef _get_attribution_text(image_info):\n \"\"\"\n Generate the attribution text from the image info\n :param image_info: the info pertaining to the licensing of the image\n :return: the attribution text\n \"\"\"\n\n title = image_info[\"title\"]\n creator = image_info[\"creator\"]\n full_license = _full_license(image_info)\n\n return f'\"{title}\" by {creator} is licensed under {full_license}.'\n\n\n# Actions\n\n\ndef _open_image(url):\n \"\"\"\n Read an image from a URL and convert it into a PIL Image object\n :param url: the URL from where to read the image\n :return: the PIL image object with the EXIF data\n \"\"\"\n logger = parent_logger.getChild(\"_open_image\")\n try:\n response = requests.get(url, headers=HEADERS)\n img_bytes = BytesIO(response.content)\n img = Image.open(img_bytes)\n # Preserve EXIF metadata\n if \"exif\" in img.info:\n exif = piexif.load(img.info[\"exif\"])\n else:\n exif = None\n return img, exif\n except requests.exceptions.RequestException as e:\n capture_exception(e)\n logger.error(f\"Error loading image data: {e}\")\n return None, None\n\n\ndef _print_attribution_on_image(img, image_info):\n \"\"\"\n Add a frame around the image and put the attribution text on the bottom\n :param img: the image to frame and attribute\n :param image_info: the information about a particular image\n :return: return the framed and attributed image\n \"\"\"\n\n width, height = img.size\n smaller_dimension = _smaller_dimension(width, height)\n\n if smaller_dimension is Dimension.NONE:\n margin = round(MARGIN_RATIO * min(width, height))\n font_size = round(FONT_RATIO * min(width, height))\n new_width = width\n else:\n margin = round(MARGIN_RATIO * BREAKPOINT_DIMENSION)\n font_size = round(FONT_RATIO * BREAKPOINT_DIMENSION)\n new_width = (\n BREAKPOINT_DIMENSION if Dimension.WIDTH in smaller_dimension else width\n )\n\n font = ImageFont.truetype(_get_font_path(), size=font_size)\n\n text = _get_attribution_text(image_info)\n text = _fit_in_width(text, font, new_width)\n _, attribution_height = font.getsize_multiline(text)\n\n frame_width = margin + new_width + margin\n frame_height = margin + height + margin + attribution_height + margin\n left_margin = (frame_width - width) // 2\n\n frame = _create_frame(\n (\n frame_width,\n frame_height,\n )\n )\n _frame_image(img, frame, left_margin, margin)\n\n draw = ImageDraw.Draw(frame)\n text_position_x = margin\n text_position_y = margin + height + margin\n draw.text(\n xy=(\n text_position_x,\n text_position_y,\n ),\n text=text,\n font=font,\n fill=TEXT_COLOR,\n )\n\n return frame\n\n\ndef watermark(image_url, info, draw_frame=True):\n \"\"\"\n Returns a PIL Image with a watermark and embedded metadata.\n\n :param image_url: The URL of the image.\n :param info: A dictionary with keys title, creator, license, and\n license_version\n :param draw_frame: Whether to draw an attribution frame.\n :returns: A PIL Image and its EXIF data, if included.\n \"\"\"\n\n img, exif = _open_image(image_url)\n if not draw_frame:\n return img, exif\n frame = _print_attribution_on_image(img, info)\n return frame, exif\n", "path": "api/catalog/api/utils/watermark.py"}], "after_files": [{"content": "import logging\nimport os\nimport struct\nfrom enum import Flag, auto\nfrom io import BytesIO\nfrom textwrap import wrap\n\nfrom django.conf import settings\n\nimport piexif\nimport requests\nfrom PIL import Image, ImageDraw, ImageFont\nfrom sentry_sdk import capture_exception\n\n\nparent_logger = logging.getLogger(__name__)\n\n\nBREAKPOINT_DIMENSION = 400 # 400px\nMARGIN_RATIO = 0.04 # 4%\nFONT_RATIO = 0.04 # 4%\n\nFRAME_COLOR = \"#fff\" # White frame\nTEXT_COLOR = \"#000\" # Black text\nHEADERS = {\n \"User-Agent\": settings.OUTBOUND_USER_AGENT_TEMPLATE.format(purpose=\"Watermark\")\n}\n\n\nclass Dimension(Flag):\n \"\"\"\n This enum represents the two dimensions of an image\n \"\"\"\n\n HEIGHT = auto()\n WIDTH = auto()\n BOTH = HEIGHT | WIDTH\n NONE = 0\n\n\n# Utils\n\n\ndef _smaller_dimension(width, height):\n \"\"\"\n Determine which image dimensions are below the breakpoint dimensions\n :param width: the width of the image\n :param height: the height of the image\n :return: True if the image is small, False otherwise\n \"\"\"\n\n smaller_dimension = Dimension.NONE\n if width < BREAKPOINT_DIMENSION:\n smaller_dimension = smaller_dimension | Dimension.WIDTH\n if height < BREAKPOINT_DIMENSION:\n smaller_dimension = smaller_dimension | Dimension.HEIGHT\n return smaller_dimension\n\n\ndef _get_font_path(monospace=False):\n \"\"\"\n Return the path to the TTF font file\n :param monospace: True for monospaced font, False for variable-width font\n :return: the path to the TTF font file\n \"\"\"\n\n font_name = \"SourceCodePro-Bold.ttf\" if monospace else \"SourceSansPro-Bold.ttf\"\n font_path = os.path.join(os.path.dirname(__file__), \"fonts\", font_name)\n\n return font_path\n\n\ndef _fit_in_width(text, font, max_width):\n \"\"\"\n Break the given text so that it fits in the given space\n :param text: the text to fit in the limited width\n :param font: the font containing size and other info\n :param max_width: the maximum width the text is allowed to take\n :return: the fitted text\n \"\"\"\n\n char_width, _ = font.getsize(\"x\") # x has the closest to average width\n max_chars = max_width // char_width\n\n text = \"\\n\".join([\"\\n\".join(wrap(line, max_chars)) for line in text.split(\"\\n\")])\n\n return text\n\n\n# Framing\n\n\ndef _create_frame(dimensions):\n \"\"\"\n Creates an frame with the given dimensions\n :param dimensions: a tuple containing the width and height of the frame\n :return: a white frame with the given dimensions\n \"\"\"\n\n return Image.new(\"RGB\", dimensions, FRAME_COLOR)\n\n\ndef _frame_image(image, frame, left_margin, top_margin):\n \"\"\"\n Fix the image in the frame with the specified spacing\n :param image: the image to frame\n :param frame: the frame in which to fit the image\n :param left_margin: the margin to the left of the image\n :param top_margin: the margin to the top of the image\n :return: the framed image\n \"\"\"\n\n frame.paste(image, (left_margin, top_margin))\n return frame\n\n\n# Attribution\n\n\ndef _full_license(image_info):\n \"\"\"\n Get the full license from the image info\n :param image_info: the information about a particular image\n :return: the full license text for the image\n \"\"\"\n\n license_name = image_info[\"license\"].upper()\n license_version = image_info[\"license_version\"].upper()\n prefix = \"\" if license_name == \"CC0\" else \"CC \"\n\n return f\"{prefix}{license_name} {license_version}\"\n\n\ndef _get_attribution_text(image_info):\n \"\"\"\n Generate the attribution text from the image info\n :param image_info: the info pertaining to the licensing of the image\n :return: the attribution text\n \"\"\"\n\n title = image_info[\"title\"]\n creator = image_info[\"creator\"]\n full_license = _full_license(image_info)\n\n return f'\"{title}\" by {creator} is licensed under {full_license}.'\n\n\n# Actions\n\n\ndef _open_image(url):\n \"\"\"\n Read an image from a URL and convert it into a PIL Image object\n :param url: the URL from where to read the image\n :return: the PIL image object with the EXIF data\n \"\"\"\n logger = parent_logger.getChild(\"_open_image\")\n try:\n response = requests.get(url, headers=HEADERS)\n img_bytes = BytesIO(response.content)\n img = Image.open(img_bytes)\n except requests.exceptions.RequestException as e:\n capture_exception(e)\n logger.error(f\"Error loading image data: {e}\")\n return None, None\n\n try:\n # Preserve EXIF metadata\n exif = piexif.load(img.info[\"exif\"]) if \"exif\" in img.info else None\n return img, exif\n except struct.error:\n return img, None\n\n\ndef _print_attribution_on_image(img, image_info):\n \"\"\"\n Add a frame around the image and put the attribution text on the bottom\n :param img: the image to frame and attribute\n :param image_info: the information about a particular image\n :return: return the framed and attributed image\n \"\"\"\n\n width, height = img.size\n smaller_dimension = _smaller_dimension(width, height)\n\n if smaller_dimension is Dimension.NONE:\n margin = round(MARGIN_RATIO * min(width, height))\n font_size = round(FONT_RATIO * min(width, height))\n new_width = width\n else:\n margin = round(MARGIN_RATIO * BREAKPOINT_DIMENSION)\n font_size = round(FONT_RATIO * BREAKPOINT_DIMENSION)\n new_width = (\n BREAKPOINT_DIMENSION if Dimension.WIDTH in smaller_dimension else width\n )\n\n font = ImageFont.truetype(_get_font_path(), size=font_size)\n\n text = _get_attribution_text(image_info)\n text = _fit_in_width(text, font, new_width)\n _, attribution_height = font.getsize_multiline(text)\n\n frame_width = margin + new_width + margin\n frame_height = margin + height + margin + attribution_height + margin\n left_margin = (frame_width - width) // 2\n\n frame = _create_frame(\n (\n frame_width,\n frame_height,\n )\n )\n _frame_image(img, frame, left_margin, margin)\n\n draw = ImageDraw.Draw(frame)\n text_position_x = margin\n text_position_y = margin + height + margin\n draw.text(\n xy=(\n text_position_x,\n text_position_y,\n ),\n text=text,\n font=font,\n fill=TEXT_COLOR,\n )\n\n return frame\n\n\ndef watermark(image_url, info, draw_frame=True):\n \"\"\"\n Returns a PIL Image with a watermark and embedded metadata.\n\n :param image_url: The URL of the image.\n :param info: A dictionary with keys title, creator, license, and\n license_version\n :param draw_frame: Whether to draw an attribution frame.\n :returns: A PIL Image and its EXIF data, if included.\n \"\"\"\n\n img, exif = _open_image(image_url)\n if not draw_frame:\n return img, exif\n frame = _print_attribution_on_image(img, info)\n return frame, exif\n", "path": "api/catalog/api/utils/watermark.py"}]}
| 2,834 | 289 |
gh_patches_debug_37926
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-5842
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Samsonite spider finds dealers, not official stores
This spider is wrong, e.g., the stores in Norway are not official Samsonite stores but dealers carrying the Samsonite brand
E.g., this is Chillout Travel Store, not a Samsonite store
https://www.alltheplaces.xyz/map/#15.79/59.920398/10.757257
The website does list official stores and dealers separately, so it should be possible to import the right type?
https://www.samsonite.no/samsonite-store/?search=dealer&city=&country=no&lat=59.920469259204786&lng=10.755597088646583&radius=20
_Originally posted by @eisams in https://github.com/alltheplaces/alltheplaces/issues/4385#issuecomment-1586255246_
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/samsonite_eu.py`
Content:
```
1 import scrapy
2 import xmltodict
3
4 from locations.dict_parser import DictParser
5
6
7 class SamsoniteEuSpider(scrapy.Spider):
8 name = "samsonite_eu"
9 item_attributes = {
10 "brand": "Samsonite",
11 "brand_wikidata": "Q1203426",
12 }
13 allowed_domains = ["samsonite.com"]
14
15 def start_requests(self):
16 country_eu = [
17 "AL",
18 "CZ",
19 "DE",
20 "DK",
21 "CY",
22 "AT",
23 "BE",
24 "BG",
25 "CH",
26 "EE",
27 "EL",
28 "ES",
29 "FI",
30 "FR",
31 "HR",
32 "HU",
33 "IE",
34 "IS",
35 "IT",
36 "LT",
37 "LU",
38 "NL",
39 "NO",
40 "LV",
41 "ME",
42 "MT",
43 "MK",
44 "LI",
45 "PL",
46 "SI",
47 "SK",
48 "TR",
49 "UK",
50 "RS",
51 "SE",
52 "PT",
53 "RO",
54 ]
55 template = "https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000"
56 for country in country_eu:
57 yield scrapy.Request(url=template.format(country), callback=self.parse)
58
59 def parse(self, response):
60 data = xmltodict.parse(response.text)
61 if data.get("dealers"):
62 stores = data.get("dealers", {}).get("dealer")
63 stores = stores if type(stores) == list else [stores]
64 for store in stores:
65 item = DictParser.parse(store)
66 item["ref"] = store.get("fld_Deal_Id")
67 item["street_address"] = store.get("fld_Deal_Address1")
68 item["city"] = store.get("fld_Deal_City1")
69 item["postcode"] = store.get("fld_Deal_Zip")
70 item["country"] = store.get("fld_Coun_Name")
71 item["phone"] = store.get("fld_Deal_Phone")
72 item["email"] = store.get("fld_Deal_Email")
73
74 yield item
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/samsonite_eu.py b/locations/spiders/samsonite_eu.py
--- a/locations/spiders/samsonite_eu.py
+++ b/locations/spiders/samsonite_eu.py
@@ -1,15 +1,13 @@
import scrapy
import xmltodict
-from locations.dict_parser import DictParser
+from locations.items import Feature, add_social_media
class SamsoniteEuSpider(scrapy.Spider):
name = "samsonite_eu"
- item_attributes = {
- "brand": "Samsonite",
- "brand_wikidata": "Q1203426",
- }
+ CHIC_ACCENT = {"brand": "Chic Accent"}
+ SAMSONITE = {"brand": "Samsonite", "brand_wikidata": "Q1203426"}
allowed_domains = ["samsonite.com"]
def start_requests(self):
@@ -51,6 +49,7 @@
"SE",
"PT",
"RO",
+ "GB",
]
template = "https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000"
for country in country_eu:
@@ -62,13 +61,31 @@
stores = data.get("dealers", {}).get("dealer")
stores = stores if type(stores) == list else [stores]
for store in stores:
- item = DictParser.parse(store)
+ if store["fld_Deal_DeCl_ID"] != "9":
+ continue
+ item = Feature()
+ item["lat"] = store["Latitude"]
+ item["lon"] = store["Longitude"]
item["ref"] = store.get("fld_Deal_Id")
item["street_address"] = store.get("fld_Deal_Address1")
item["city"] = store.get("fld_Deal_City1")
item["postcode"] = store.get("fld_Deal_Zip")
item["country"] = store.get("fld_Coun_Name")
- item["phone"] = store.get("fld_Deal_Phone")
- item["email"] = store.get("fld_Deal_Email")
+ item["email"] = store.get("fld_Deal_Email") or ""
+ item["website"] = store["fld_Deal_DetailPageUrl"]
+
+ if "chicaccent.com" in item["email"]:
+ item.update(self.CHIC_ACCENT)
+ else:
+ item.update(self.SAMSONITE)
+
+ if phone := store.get("fld_Deal_Phone"):
+ phone = store["fld_Deal_Prefix"] + phone.lower()
+
+ if "whatsapp" in phone:
+ phone, whats_app = phone.split("whatsapp")
+ add_social_media(item, "WhatsApp", whats_app.strip(" :"))
+
+ item["phone"] = phone
yield item
|
{"golden_diff": "diff --git a/locations/spiders/samsonite_eu.py b/locations/spiders/samsonite_eu.py\n--- a/locations/spiders/samsonite_eu.py\n+++ b/locations/spiders/samsonite_eu.py\n@@ -1,15 +1,13 @@\n import scrapy\n import xmltodict\n \n-from locations.dict_parser import DictParser\n+from locations.items import Feature, add_social_media\n \n \n class SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n- item_attributes = {\n- \"brand\": \"Samsonite\",\n- \"brand_wikidata\": \"Q1203426\",\n- }\n+ CHIC_ACCENT = {\"brand\": \"Chic Accent\"}\n+ SAMSONITE = {\"brand\": \"Samsonite\", \"brand_wikidata\": \"Q1203426\"}\n allowed_domains = [\"samsonite.com\"]\n \n def start_requests(self):\n@@ -51,6 +49,7 @@\n \"SE\",\n \"PT\",\n \"RO\",\n+ \"GB\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n@@ -62,13 +61,31 @@\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n- item = DictParser.parse(store)\n+ if store[\"fld_Deal_DeCl_ID\"] != \"9\":\n+ continue\n+ item = Feature()\n+ item[\"lat\"] = store[\"Latitude\"]\n+ item[\"lon\"] = store[\"Longitude\"]\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n- item[\"phone\"] = store.get(\"fld_Deal_Phone\")\n- item[\"email\"] = store.get(\"fld_Deal_Email\")\n+ item[\"email\"] = store.get(\"fld_Deal_Email\") or \"\"\n+ item[\"website\"] = store[\"fld_Deal_DetailPageUrl\"]\n+\n+ if \"chicaccent.com\" in item[\"email\"]:\n+ item.update(self.CHIC_ACCENT)\n+ else:\n+ item.update(self.SAMSONITE)\n+\n+ if phone := store.get(\"fld_Deal_Phone\"):\n+ phone = store[\"fld_Deal_Prefix\"] + phone.lower()\n+\n+ if \"whatsapp\" in phone:\n+ phone, whats_app = phone.split(\"whatsapp\")\n+ add_social_media(item, \"WhatsApp\", whats_app.strip(\" :\"))\n+\n+ item[\"phone\"] = phone\n \n yield item\n", "issue": "Samsonite spider finds dealers, not official stores\nThis spider is wrong, e.g., the stores in Norway are not official Samsonite stores but dealers carrying the Samsonite brand\r\n\r\nE.g., this is Chillout Travel Store, not a Samsonite store\r\nhttps://www.alltheplaces.xyz/map/#15.79/59.920398/10.757257\r\n\r\nThe website does list official stores and dealers separately, so it should be possible to import the right type?\r\nhttps://www.samsonite.no/samsonite-store/?search=dealer&city=&country=no&lat=59.920469259204786&lng=10.755597088646583&radius=20\r\n\r\n_Originally posted by @eisams in https://github.com/alltheplaces/alltheplaces/issues/4385#issuecomment-1586255246_\r\n \n", "before_files": [{"content": "import scrapy\nimport xmltodict\n\nfrom locations.dict_parser import DictParser\n\n\nclass SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n item_attributes = {\n \"brand\": \"Samsonite\",\n \"brand_wikidata\": \"Q1203426\",\n }\n allowed_domains = [\"samsonite.com\"]\n\n def start_requests(self):\n country_eu = [\n \"AL\",\n \"CZ\",\n \"DE\",\n \"DK\",\n \"CY\",\n \"AT\",\n \"BE\",\n \"BG\",\n \"CH\",\n \"EE\",\n \"EL\",\n \"ES\",\n \"FI\",\n \"FR\",\n \"HR\",\n \"HU\",\n \"IE\",\n \"IS\",\n \"IT\",\n \"LT\",\n \"LU\",\n \"NL\",\n \"NO\",\n \"LV\",\n \"ME\",\n \"MT\",\n \"MK\",\n \"LI\",\n \"PL\",\n \"SI\",\n \"SK\",\n \"TR\",\n \"UK\",\n \"RS\",\n \"SE\",\n \"PT\",\n \"RO\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n yield scrapy.Request(url=template.format(country), callback=self.parse)\n\n def parse(self, response):\n data = xmltodict.parse(response.text)\n if data.get(\"dealers\"):\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n item = DictParser.parse(store)\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n item[\"phone\"] = store.get(\"fld_Deal_Phone\")\n item[\"email\"] = store.get(\"fld_Deal_Email\")\n\n yield item\n", "path": "locations/spiders/samsonite_eu.py"}], "after_files": [{"content": "import scrapy\nimport xmltodict\n\nfrom locations.items import Feature, add_social_media\n\n\nclass SamsoniteEuSpider(scrapy.Spider):\n name = \"samsonite_eu\"\n CHIC_ACCENT = {\"brand\": \"Chic Accent\"}\n SAMSONITE = {\"brand\": \"Samsonite\", \"brand_wikidata\": \"Q1203426\"}\n allowed_domains = [\"samsonite.com\"]\n\n def start_requests(self):\n country_eu = [\n \"AL\",\n \"CZ\",\n \"DE\",\n \"DK\",\n \"CY\",\n \"AT\",\n \"BE\",\n \"BG\",\n \"CH\",\n \"EE\",\n \"EL\",\n \"ES\",\n \"FI\",\n \"FR\",\n \"HR\",\n \"HU\",\n \"IE\",\n \"IS\",\n \"IT\",\n \"LT\",\n \"LU\",\n \"NL\",\n \"NO\",\n \"LV\",\n \"ME\",\n \"MT\",\n \"MK\",\n \"LI\",\n \"PL\",\n \"SI\",\n \"SK\",\n \"TR\",\n \"UK\",\n \"RS\",\n \"SE\",\n \"PT\",\n \"RO\",\n \"GB\",\n ]\n template = \"https://storelocator.samsonite.eu/data-exchange/getDealerLocatorMapV2_Radius.aspx?s=sams&country={}&search=dealer&lat=48.85799300000001&lng=2.381153&radius=100000\"\n for country in country_eu:\n yield scrapy.Request(url=template.format(country), callback=self.parse)\n\n def parse(self, response):\n data = xmltodict.parse(response.text)\n if data.get(\"dealers\"):\n stores = data.get(\"dealers\", {}).get(\"dealer\")\n stores = stores if type(stores) == list else [stores]\n for store in stores:\n if store[\"fld_Deal_DeCl_ID\"] != \"9\":\n continue\n item = Feature()\n item[\"lat\"] = store[\"Latitude\"]\n item[\"lon\"] = store[\"Longitude\"]\n item[\"ref\"] = store.get(\"fld_Deal_Id\")\n item[\"street_address\"] = store.get(\"fld_Deal_Address1\")\n item[\"city\"] = store.get(\"fld_Deal_City1\")\n item[\"postcode\"] = store.get(\"fld_Deal_Zip\")\n item[\"country\"] = store.get(\"fld_Coun_Name\")\n item[\"email\"] = store.get(\"fld_Deal_Email\") or \"\"\n item[\"website\"] = store[\"fld_Deal_DetailPageUrl\"]\n\n if \"chicaccent.com\" in item[\"email\"]:\n item.update(self.CHIC_ACCENT)\n else:\n item.update(self.SAMSONITE)\n\n if phone := store.get(\"fld_Deal_Phone\"):\n phone = store[\"fld_Deal_Prefix\"] + phone.lower()\n\n if \"whatsapp\" in phone:\n phone, whats_app = phone.split(\"whatsapp\")\n add_social_media(item, \"WhatsApp\", whats_app.strip(\" :\"))\n\n item[\"phone\"] = phone\n\n yield item\n", "path": "locations/spiders/samsonite_eu.py"}]}
| 1,149 | 699 |
gh_patches_debug_25598
|
rasdani/github-patches
|
git_diff
|
alltheplaces__alltheplaces-3459
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Spider public_storage is broken
During the global build at 2021-08-04-14-42-45, spider **public_storage** failed with **834 features** and **1879 errors**.
Here's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/public_storage.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson))
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `locations/spiders/public_storage.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import scrapy
3 import json
4
5 from locations.items import GeojsonPointItem
6 from locations.hours import OpeningHours
7
8
9 class PublicStorageSpider(scrapy.Spider):
10 name = "public_storage"
11 item_attributes = { 'brand': "Public Storage" }
12 allowed_domains = ["www.publicstorage.com"]
13 start_urls = (
14 'https://www.publicstorage.com/sitemap_plp.xml',
15 )
16
17 def parse(self, response):
18 response.selector.remove_namespaces()
19 city_urls = response.xpath('//url/loc/text()').extract()
20 for path in city_urls:
21 yield scrapy.Request(
22 path.strip(),
23 callback=self.parse_store,
24 )
25
26 def parse_hours(self, hours):
27 opening_hours = OpeningHours()
28
29 for hour in hours:
30 for day in hour['dayOfWeek']:
31 opening_hours.add_range(
32 day=day[:2],
33 open_time=hour["opens"],
34 close_time=hour["closes"],
35 )
36
37 return opening_hours.as_opening_hours()
38
39 def parse_store(self, response):
40 data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
41 data = data['@graph'][0]
42
43 properties = {
44 "ref": data['@id'],
45 "opening_hours": self.parse_hours(data['openingHoursSpecification']),
46 "addr_full": data['address']['streetAddress'],
47 "city": data['address']['addressLocality'],
48 "state": data['address']['addressRegion'],
49 "postcode": data['address']['postalCode'],
50 "phone": data['telephone'],
51 "lat": data['geo']['latitude'],
52 "lon": data['geo']['longitude'],
53 }
54
55 yield GeojsonPointItem(**properties)
56
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py
--- a/locations/spiders/public_storage.py
+++ b/locations/spiders/public_storage.py
@@ -20,9 +20,13 @@
for path in city_urls:
yield scrapy.Request(
path.strip(),
- callback=self.parse_store,
+ callback=self.load_store,
)
+ def load_store(self, response):
+ ldjson = response.xpath('//link[@type="application/ld+json"]/@href').get()
+ yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)
+
def parse_hours(self, hours):
opening_hours = OpeningHours()
@@ -37,11 +41,11 @@
return opening_hours.as_opening_hours()
def parse_store(self, response):
- data = json.loads(response.xpath('//script[@type="application/ld+json"]/text()').extract_first())
- data = data['@graph'][0]
+ data = response.json()['@graph'][0]
properties = {
"ref": data['@id'],
+ "website": data['url'],
"opening_hours": self.parse_hours(data['openingHoursSpecification']),
"addr_full": data['address']['streetAddress'],
"city": data['address']['addressLocality'],
|
{"golden_diff": "diff --git a/locations/spiders/public_storage.py b/locations/spiders/public_storage.py\n--- a/locations/spiders/public_storage.py\n+++ b/locations/spiders/public_storage.py\n@@ -20,9 +20,13 @@\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n- callback=self.parse_store,\n+ callback=self.load_store,\n )\n \n+ def load_store(self, response):\n+ ldjson = response.xpath('//link[@type=\"application/ld+json\"]/@href').get()\n+ yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)\n+\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n \n@@ -37,11 +41,11 @@\n return opening_hours.as_opening_hours()\n \n def parse_store(self, response):\n- data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n- data = data['@graph'][0]\n+ data = response.json()['@graph'][0]\n \n properties = {\n \"ref\": data['@id'],\n+ \"website\": data['url'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n", "issue": "Spider public_storage is broken\nDuring the global build at 2021-08-04-14-42-45, spider **public_storage** failed with **834 features** and **1879 errors**.\n\nHere's [the log](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/logs/public_storage.txt) and [the output](https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson) ([on a map](https://data.alltheplaces.xyz/map.html?show=https://data.alltheplaces.xyz/runs/2021-08-04-14-42-45/output/public_storage.geojson))\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.parse_store,\n )\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n for day in hour['dayOfWeek']:\n opening_hours.add_range(\n day=day[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n data = json.loads(response.xpath('//script[@type=\"application/ld+json\"]/text()').extract_first())\n data = data['@graph'][0]\n\n properties = {\n \"ref\": data['@id'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n \"state\": data['address']['addressRegion'],\n \"postcode\": data['address']['postalCode'],\n \"phone\": data['telephone'],\n \"lat\": data['geo']['latitude'],\n \"lon\": data['geo']['longitude'],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport scrapy\nimport json\n\nfrom locations.items import GeojsonPointItem\nfrom locations.hours import OpeningHours\n\n\nclass PublicStorageSpider(scrapy.Spider):\n name = \"public_storage\"\n item_attributes = { 'brand': \"Public Storage\" }\n allowed_domains = [\"www.publicstorage.com\"]\n start_urls = (\n 'https://www.publicstorage.com/sitemap_plp.xml',\n )\n\n def parse(self, response):\n response.selector.remove_namespaces()\n city_urls = response.xpath('//url/loc/text()').extract()\n for path in city_urls:\n yield scrapy.Request(\n path.strip(),\n callback=self.load_store,\n )\n\n def load_store(self, response):\n ldjson = response.xpath('//link[@type=\"application/ld+json\"]/@href').get()\n yield scrapy.Request(response.urljoin(ldjson), callback=self.parse_store)\n\n def parse_hours(self, hours):\n opening_hours = OpeningHours()\n\n for hour in hours:\n for day in hour['dayOfWeek']:\n opening_hours.add_range(\n day=day[:2],\n open_time=hour[\"opens\"],\n close_time=hour[\"closes\"],\n )\n\n return opening_hours.as_opening_hours()\n\n def parse_store(self, response):\n data = response.json()['@graph'][0]\n\n properties = {\n \"ref\": data['@id'],\n \"website\": data['url'],\n \"opening_hours\": self.parse_hours(data['openingHoursSpecification']),\n \"addr_full\": data['address']['streetAddress'],\n \"city\": data['address']['addressLocality'],\n \"state\": data['address']['addressRegion'],\n \"postcode\": data['address']['postalCode'],\n \"phone\": data['telephone'],\n \"lat\": data['geo']['latitude'],\n \"lon\": data['geo']['longitude'],\n }\n\n yield GeojsonPointItem(**properties)\n", "path": "locations/spiders/public_storage.py"}]}
| 926 | 292 |
gh_patches_debug_22879
|
rasdani/github-patches
|
git_diff
|
techmatters__terraso-backend-1223
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add ability to change user password from Django admin
## Description
Add ability to change user password from Django admin.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `terraso_backend/apps/core/admin.py`
Content:
```
1 # Copyright © 2021-2023 Technology Matters
2 #
3 # This program is free software: you can redistribute it and/or modify
4 # it under the terms of the GNU Affero General Public License as published
5 # by the Free Software Foundation, either version 3 of the License, or
6 # (at your option) any later version.
7 #
8 # This program is distributed in the hope that it will be useful,
9 # but WITHOUT ANY WARRANTY; without even the implied warranty of
10 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 # GNU Affero General Public License for more details.
12 #
13 # You should have received a copy of the GNU Affero General Public License
14 # along with this program. If not, see https://www.gnu.org/licenses/.
15
16 from django.contrib import admin
17
18 from .models import (
19 Group,
20 Landscape,
21 LandscapeDevelopmentStrategy,
22 LandscapeGroup,
23 SharedResource,
24 TaxonomyTerm,
25 User,
26 UserPreference,
27 )
28
29
30 @admin.register(Group)
31 class GroupAdmin(admin.ModelAdmin):
32 list_display = ("name", "slug", "website", "created_at")
33
34 def get_queryset(self, request):
35 qs = super().get_queryset(request)
36 return qs.exclude(associated_landscapes__is_default_landscape_group=True)
37
38
39 @admin.register(Landscape)
40 class LandscapeAdmin(admin.ModelAdmin):
41 list_display = ("name", "slug", "location", "website", "created_at")
42 raw_id_fields = ("membership_list",)
43
44
45 class LandscapeDefaultGroup(Group):
46 class Meta:
47 proxy = True
48
49
50 @admin.register(LandscapeGroup)
51 class LandscapeGroupAdmin(admin.ModelAdmin):
52 list_display = ("landscape", "group")
53
54
55 class UserPreferenceInline(admin.TabularInline):
56 model = UserPreference
57
58
59 @admin.register(User)
60 class UserAdmin(admin.ModelAdmin):
61 list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
62 inlines = [UserPreferenceInline]
63
64
65 @admin.register(TaxonomyTerm)
66 class TaxonomyTermAdmin(admin.ModelAdmin):
67 list_display = ("value_original", "type", "value_en", "value_es")
68
69
70 @admin.register(LandscapeDevelopmentStrategy)
71 class LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):
72 list_display = ("id", "landscape")
73
74
75 @admin.register(SharedResource)
76 class SharedResourceAdmin(admin.ModelAdmin):
77 list_display = ("id", "share_uuid", "share_access")
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py
--- a/terraso_backend/apps/core/admin.py
+++ b/terraso_backend/apps/core/admin.py
@@ -14,6 +14,7 @@
# along with this program. If not, see https://www.gnu.org/licenses/.
from django.contrib import admin
+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin
from .models import (
Group,
@@ -57,9 +58,28 @@
@admin.register(User)
-class UserAdmin(admin.ModelAdmin):
+class UserAdmin(DjangoUserAdmin):
+ ordering = ("email",)
list_display = ("email", "first_name", "last_name", "created_at", "is_staff")
+ search_fields = ("email", "first_name", "last_name")
inlines = [UserPreferenceInline]
+ fieldsets = (
+ (None, {"fields": ("email", "password")}),
+ ("Personal info", {"fields": ("first_name", "last_name")}),
+ (
+ "Permissions",
+ {
+ "fields": (
+ "is_active",
+ "is_staff",
+ "is_superuser",
+ "groups",
+ "user_permissions",
+ ),
+ },
+ ),
+ ("Important dates", {"fields": ("last_login", "date_joined")}),
+ )
@admin.register(TaxonomyTerm)
|
{"golden_diff": "diff --git a/terraso_backend/apps/core/admin.py b/terraso_backend/apps/core/admin.py\n--- a/terraso_backend/apps/core/admin.py\n+++ b/terraso_backend/apps/core/admin.py\n@@ -14,6 +14,7 @@\n # along with this program. If not, see https://www.gnu.org/licenses/.\n \n from django.contrib import admin\n+from django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\n \n from .models import (\n Group,\n@@ -57,9 +58,28 @@\n \n \n @admin.register(User)\n-class UserAdmin(admin.ModelAdmin):\n+class UserAdmin(DjangoUserAdmin):\n+ ordering = (\"email\",)\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n+ search_fields = (\"email\", \"first_name\", \"last_name\")\n inlines = [UserPreferenceInline]\n+ fieldsets = (\n+ (None, {\"fields\": (\"email\", \"password\")}),\n+ (\"Personal info\", {\"fields\": (\"first_name\", \"last_name\")}),\n+ (\n+ \"Permissions\",\n+ {\n+ \"fields\": (\n+ \"is_active\",\n+ \"is_staff\",\n+ \"is_superuser\",\n+ \"groups\",\n+ \"user_permissions\",\n+ ),\n+ },\n+ ),\n+ (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n+ )\n \n \n @admin.register(TaxonomyTerm)\n", "issue": "Add ability to change user password from Django admin\n## Description\r\nAdd ability to change user password from Django admin.\n", "before_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nfrom django.contrib import admin\n\nfrom .models import (\n Group,\n Landscape,\n LandscapeDevelopmentStrategy,\n LandscapeGroup,\n SharedResource,\n TaxonomyTerm,\n User,\n UserPreference,\n)\n\n\[email protected](Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"website\", \"created_at\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.exclude(associated_landscapes__is_default_landscape_group=True)\n\n\[email protected](Landscape)\nclass LandscapeAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"location\", \"website\", \"created_at\")\n raw_id_fields = (\"membership_list\",)\n\n\nclass LandscapeDefaultGroup(Group):\n class Meta:\n proxy = True\n\n\[email protected](LandscapeGroup)\nclass LandscapeGroupAdmin(admin.ModelAdmin):\n list_display = (\"landscape\", \"group\")\n\n\nclass UserPreferenceInline(admin.TabularInline):\n model = UserPreference\n\n\[email protected](User)\nclass UserAdmin(admin.ModelAdmin):\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n inlines = [UserPreferenceInline]\n\n\[email protected](TaxonomyTerm)\nclass TaxonomyTermAdmin(admin.ModelAdmin):\n list_display = (\"value_original\", \"type\", \"value_en\", \"value_es\")\n\n\[email protected](LandscapeDevelopmentStrategy)\nclass LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"landscape\")\n\n\[email protected](SharedResource)\nclass SharedResourceAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"share_uuid\", \"share_access\")\n", "path": "terraso_backend/apps/core/admin.py"}], "after_files": [{"content": "# Copyright \u00a9 2021-2023 Technology Matters\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see https://www.gnu.org/licenses/.\n\nfrom django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin as DjangoUserAdmin\n\nfrom .models import (\n Group,\n Landscape,\n LandscapeDevelopmentStrategy,\n LandscapeGroup,\n SharedResource,\n TaxonomyTerm,\n User,\n UserPreference,\n)\n\n\[email protected](Group)\nclass GroupAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"website\", \"created_at\")\n\n def get_queryset(self, request):\n qs = super().get_queryset(request)\n return qs.exclude(associated_landscapes__is_default_landscape_group=True)\n\n\[email protected](Landscape)\nclass LandscapeAdmin(admin.ModelAdmin):\n list_display = (\"name\", \"slug\", \"location\", \"website\", \"created_at\")\n raw_id_fields = (\"membership_list\",)\n\n\nclass LandscapeDefaultGroup(Group):\n class Meta:\n proxy = True\n\n\[email protected](LandscapeGroup)\nclass LandscapeGroupAdmin(admin.ModelAdmin):\n list_display = (\"landscape\", \"group\")\n\n\nclass UserPreferenceInline(admin.TabularInline):\n model = UserPreference\n\n\[email protected](User)\nclass UserAdmin(DjangoUserAdmin):\n ordering = (\"email\",)\n list_display = (\"email\", \"first_name\", \"last_name\", \"created_at\", \"is_staff\")\n search_fields = (\"email\", \"first_name\", \"last_name\")\n inlines = [UserPreferenceInline]\n fieldsets = (\n (None, {\"fields\": (\"email\", \"password\")}),\n (\"Personal info\", {\"fields\": (\"first_name\", \"last_name\")}),\n (\n \"Permissions\",\n {\n \"fields\": (\n \"is_active\",\n \"is_staff\",\n \"is_superuser\",\n \"groups\",\n \"user_permissions\",\n ),\n },\n ),\n (\"Important dates\", {\"fields\": (\"last_login\", \"date_joined\")}),\n )\n\n\[email protected](TaxonomyTerm)\nclass TaxonomyTermAdmin(admin.ModelAdmin):\n list_display = (\"value_original\", \"type\", \"value_en\", \"value_es\")\n\n\[email protected](LandscapeDevelopmentStrategy)\nclass LandscapeDevelopmentStrategyAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"landscape\")\n\n\[email protected](SharedResource)\nclass SharedResourceAdmin(admin.ModelAdmin):\n list_display = (\"id\", \"share_uuid\", \"share_access\")\n", "path": "terraso_backend/apps/core/admin.py"}]}
| 944 | 321 |
gh_patches_debug_26602
|
rasdani/github-patches
|
git_diff
|
docker__docker-py-1263
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Build Image Missing Arguments
The build image function is missing some arguments that are present in the v1.24 api.
- shmsize - Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB.
- labels – JSON map of string pairs for labels to set on the image.
See: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/build-image-from-a-dockerfile
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docker/api/build.py`
Content:
```
1 import logging
2 import os
3 import re
4 import json
5
6 from .. import constants
7 from .. import errors
8 from .. import auth
9 from .. import utils
10
11
12 log = logging.getLogger(__name__)
13
14
15 class BuildApiMixin(object):
16 def build(self, path=None, tag=None, quiet=False, fileobj=None,
17 nocache=False, rm=False, stream=False, timeout=None,
18 custom_context=False, encoding=None, pull=False,
19 forcerm=False, dockerfile=None, container_limits=None,
20 decode=False, buildargs=None, gzip=False):
21 remote = context = None
22 headers = {}
23 container_limits = container_limits or {}
24 if path is None and fileobj is None:
25 raise TypeError("Either path or fileobj needs to be provided.")
26 if gzip and encoding is not None:
27 raise errors.DockerException(
28 'Can not use custom encoding if gzip is enabled'
29 )
30
31 for key in container_limits.keys():
32 if key not in constants.CONTAINER_LIMITS_KEYS:
33 raise errors.DockerException(
34 'Invalid container_limits key {0}'.format(key)
35 )
36
37 if custom_context:
38 if not fileobj:
39 raise TypeError("You must specify fileobj with custom_context")
40 context = fileobj
41 elif fileobj is not None:
42 context = utils.mkbuildcontext(fileobj)
43 elif path.startswith(('http://', 'https://',
44 'git://', 'github.com/', 'git@')):
45 remote = path
46 elif not os.path.isdir(path):
47 raise TypeError("You must specify a directory to build in path")
48 else:
49 dockerignore = os.path.join(path, '.dockerignore')
50 exclude = None
51 if os.path.exists(dockerignore):
52 with open(dockerignore, 'r') as f:
53 exclude = list(filter(bool, f.read().splitlines()))
54 context = utils.tar(
55 path, exclude=exclude, dockerfile=dockerfile, gzip=gzip
56 )
57 encoding = 'gzip' if gzip else encoding
58
59 if utils.compare_version('1.8', self._version) >= 0:
60 stream = True
61
62 if dockerfile and utils.compare_version('1.17', self._version) < 0:
63 raise errors.InvalidVersion(
64 'dockerfile was only introduced in API version 1.17'
65 )
66
67 if utils.compare_version('1.19', self._version) < 0:
68 pull = 1 if pull else 0
69
70 u = self._url('/build')
71 params = {
72 't': tag,
73 'remote': remote,
74 'q': quiet,
75 'nocache': nocache,
76 'rm': rm,
77 'forcerm': forcerm,
78 'pull': pull,
79 'dockerfile': dockerfile,
80 }
81 params.update(container_limits)
82
83 if buildargs:
84 if utils.version_gte(self._version, '1.21'):
85 params.update({'buildargs': json.dumps(buildargs)})
86 else:
87 raise errors.InvalidVersion(
88 'buildargs was only introduced in API version 1.21'
89 )
90
91 if context is not None:
92 headers = {'Content-Type': 'application/tar'}
93 if encoding:
94 headers['Content-Encoding'] = encoding
95
96 if utils.compare_version('1.9', self._version) >= 0:
97 self._set_auth_headers(headers)
98
99 response = self._post(
100 u,
101 data=context,
102 params=params,
103 headers=headers,
104 stream=stream,
105 timeout=timeout,
106 )
107
108 if context is not None and not custom_context:
109 context.close()
110
111 if stream:
112 return self._stream_helper(response, decode=decode)
113 else:
114 output = self._result(response)
115 srch = r'Successfully built ([0-9a-f]+)'
116 match = re.search(srch, output)
117 if not match:
118 return None, output
119 return match.group(1), output
120
121 def _set_auth_headers(self, headers):
122 log.debug('Looking for auth config')
123
124 # If we don't have any auth data so far, try reloading the config
125 # file one more time in case anything showed up in there.
126 if not self._auth_configs:
127 log.debug("No auth config in memory - loading from filesystem")
128 self._auth_configs = auth.load_config()
129
130 # Send the full auth configuration (if any exists), since the build
131 # could use any (or all) of the registries.
132 if self._auth_configs:
133 log.debug(
134 'Sending auth config ({0})'.format(
135 ', '.join(repr(k) for k in self._auth_configs.keys())
136 )
137 )
138
139 if utils.compare_version('1.19', self._version) >= 0:
140 headers['X-Registry-Config'] = auth.encode_header(
141 self._auth_configs
142 )
143 else:
144 headers['X-Registry-Config'] = auth.encode_header({
145 'configs': self._auth_configs
146 })
147 else:
148 log.debug('No auth config found')
149
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docker/api/build.py b/docker/api/build.py
--- a/docker/api/build.py
+++ b/docker/api/build.py
@@ -17,7 +17,8 @@
nocache=False, rm=False, stream=False, timeout=None,
custom_context=False, encoding=None, pull=False,
forcerm=False, dockerfile=None, container_limits=None,
- decode=False, buildargs=None, gzip=False):
+ decode=False, buildargs=None, gzip=False, shmsize=None,
+ labels=None):
remote = context = None
headers = {}
container_limits = container_limits or {}
@@ -88,6 +89,22 @@
'buildargs was only introduced in API version 1.21'
)
+ if shmsize:
+ if utils.version_gte(self._version, '1.22'):
+ params.update({'shmsize': shmsize})
+ else:
+ raise errors.InvalidVersion(
+ 'shmsize was only introduced in API version 1.22'
+ )
+
+ if labels:
+ if utils.version_gte(self._version, '1.23'):
+ params.update({'labels': json.dumps(labels)})
+ else:
+ raise errors.InvalidVersion(
+ 'labels was only introduced in API version 1.23'
+ )
+
if context is not None:
headers = {'Content-Type': 'application/tar'}
if encoding:
|
{"golden_diff": "diff --git a/docker/api/build.py b/docker/api/build.py\n--- a/docker/api/build.py\n+++ b/docker/api/build.py\n@@ -17,7 +17,8 @@\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n- decode=False, buildargs=None, gzip=False):\n+ decode=False, buildargs=None, gzip=False, shmsize=None,\n+ labels=None):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n@@ -88,6 +89,22 @@\n 'buildargs was only introduced in API version 1.21'\n )\n \n+ if shmsize:\n+ if utils.version_gte(self._version, '1.22'):\n+ params.update({'shmsize': shmsize})\n+ else:\n+ raise errors.InvalidVersion(\n+ 'shmsize was only introduced in API version 1.22'\n+ )\n+\n+ if labels:\n+ if utils.version_gte(self._version, '1.23'):\n+ params.update({'labels': json.dumps(labels)})\n+ else:\n+ raise errors.InvalidVersion(\n+ 'labels was only introduced in API version 1.23'\n+ )\n+\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n", "issue": "Build Image Missing Arguments\nThe build image function is missing some arguments that are present in the v1.24 api.\n- shmsize - Size of /dev/shm in bytes. The size must be greater than 0. If omitted the system uses 64MB.\n- labels \u2013 JSON map of string pairs for labels to set on the image.\n\nSee: https://docs.docker.com/engine/reference/api/docker_remote_api_v1.24/#/build-image-from-a-dockerfile\n\n", "before_files": [{"content": "import logging\nimport os\nimport re\nimport json\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import auth\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(bool, f.read().splitlines()))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in self._auth_configs.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n self._auth_configs\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': self._auth_configs\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}], "after_files": [{"content": "import logging\nimport os\nimport re\nimport json\n\nfrom .. import constants\nfrom .. import errors\nfrom .. import auth\nfrom .. import utils\n\n\nlog = logging.getLogger(__name__)\n\n\nclass BuildApiMixin(object):\n def build(self, path=None, tag=None, quiet=False, fileobj=None,\n nocache=False, rm=False, stream=False, timeout=None,\n custom_context=False, encoding=None, pull=False,\n forcerm=False, dockerfile=None, container_limits=None,\n decode=False, buildargs=None, gzip=False, shmsize=None,\n labels=None):\n remote = context = None\n headers = {}\n container_limits = container_limits or {}\n if path is None and fileobj is None:\n raise TypeError(\"Either path or fileobj needs to be provided.\")\n if gzip and encoding is not None:\n raise errors.DockerException(\n 'Can not use custom encoding if gzip is enabled'\n )\n\n for key in container_limits.keys():\n if key not in constants.CONTAINER_LIMITS_KEYS:\n raise errors.DockerException(\n 'Invalid container_limits key {0}'.format(key)\n )\n\n if custom_context:\n if not fileobj:\n raise TypeError(\"You must specify fileobj with custom_context\")\n context = fileobj\n elif fileobj is not None:\n context = utils.mkbuildcontext(fileobj)\n elif path.startswith(('http://', 'https://',\n 'git://', 'github.com/', 'git@')):\n remote = path\n elif not os.path.isdir(path):\n raise TypeError(\"You must specify a directory to build in path\")\n else:\n dockerignore = os.path.join(path, '.dockerignore')\n exclude = None\n if os.path.exists(dockerignore):\n with open(dockerignore, 'r') as f:\n exclude = list(filter(bool, f.read().splitlines()))\n context = utils.tar(\n path, exclude=exclude, dockerfile=dockerfile, gzip=gzip\n )\n encoding = 'gzip' if gzip else encoding\n\n if utils.compare_version('1.8', self._version) >= 0:\n stream = True\n\n if dockerfile and utils.compare_version('1.17', self._version) < 0:\n raise errors.InvalidVersion(\n 'dockerfile was only introduced in API version 1.17'\n )\n\n if utils.compare_version('1.19', self._version) < 0:\n pull = 1 if pull else 0\n\n u = self._url('/build')\n params = {\n 't': tag,\n 'remote': remote,\n 'q': quiet,\n 'nocache': nocache,\n 'rm': rm,\n 'forcerm': forcerm,\n 'pull': pull,\n 'dockerfile': dockerfile,\n }\n params.update(container_limits)\n\n if buildargs:\n if utils.version_gte(self._version, '1.21'):\n params.update({'buildargs': json.dumps(buildargs)})\n else:\n raise errors.InvalidVersion(\n 'buildargs was only introduced in API version 1.21'\n )\n\n if shmsize:\n if utils.version_gte(self._version, '1.22'):\n params.update({'shmsize': shmsize})\n else:\n raise errors.InvalidVersion(\n 'shmsize was only introduced in API version 1.22'\n )\n\n if labels:\n if utils.version_gte(self._version, '1.23'):\n params.update({'labels': json.dumps(labels)})\n else:\n raise errors.InvalidVersion(\n 'labels was only introduced in API version 1.23'\n )\n\n if context is not None:\n headers = {'Content-Type': 'application/tar'}\n if encoding:\n headers['Content-Encoding'] = encoding\n\n if utils.compare_version('1.9', self._version) >= 0:\n self._set_auth_headers(headers)\n\n response = self._post(\n u,\n data=context,\n params=params,\n headers=headers,\n stream=stream,\n timeout=timeout,\n )\n\n if context is not None and not custom_context:\n context.close()\n\n if stream:\n return self._stream_helper(response, decode=decode)\n else:\n output = self._result(response)\n srch = r'Successfully built ([0-9a-f]+)'\n match = re.search(srch, output)\n if not match:\n return None, output\n return match.group(1), output\n\n def _set_auth_headers(self, headers):\n log.debug('Looking for auth config')\n\n # If we don't have any auth data so far, try reloading the config\n # file one more time in case anything showed up in there.\n if not self._auth_configs:\n log.debug(\"No auth config in memory - loading from filesystem\")\n self._auth_configs = auth.load_config()\n\n # Send the full auth configuration (if any exists), since the build\n # could use any (or all) of the registries.\n if self._auth_configs:\n log.debug(\n 'Sending auth config ({0})'.format(\n ', '.join(repr(k) for k in self._auth_configs.keys())\n )\n )\n\n if utils.compare_version('1.19', self._version) >= 0:\n headers['X-Registry-Config'] = auth.encode_header(\n self._auth_configs\n )\n else:\n headers['X-Registry-Config'] = auth.encode_header({\n 'configs': self._auth_configs\n })\n else:\n log.debug('No auth config found')\n", "path": "docker/api/build.py"}]}
| 1,793 | 321 |
gh_patches_debug_27671
|
rasdani/github-patches
|
git_diff
|
ocadotechnology__codeforlife-portal-417
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
From django administration page, in Portal, can't access Teachers or Students
Trying to access a Student or Teacher from the administration page leads to an error:
Failed to load resource: the server responded with a status of 500 (OK)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `portal/admin.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 # Code for Life
3 #
4 # Copyright (C) 2016, Ocado Innovation Limited
5 #
6 # This program is free software: you can redistribute it and/or modify
7 # it under the terms of the GNU Affero General Public License as
8 # published by the Free Software Foundation, either version 3 of the
9 # License, or (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful,
12 # but WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 # GNU Affero General Public License for more details.
15 #
16 # You should have received a copy of the GNU Affero General Public License
17 # along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #
19 # ADDITIONAL TERMS – Section 7 GNU General Public Licence
20 #
21 # This licence does not grant any right, title or interest in any “Ocado” logos,
22 # trade names or the trademark “Ocado” or any other trademarks or domain names
23 # owned by Ocado Innovation Limited or the Ocado group of companies or any other
24 # distinctive brand features of “Ocado” as may be secured from time to time. You
25 # must not distribute any modification of this program using the trademark
26 # “Ocado” or claim any affiliation or association with Ocado or its employees.
27 #
28 # You are not authorised to use the name Ocado (or any of its trade names) or
29 # the names of any author or contributor in advertising or for publicity purposes
30 # pertaining to the distribution of this program, without the prior written
31 # authorisation of Ocado.
32 #
33 # Any propagation, distribution or conveyance of this program must include this
34 # copyright notice and these terms. You must not misrepresent the origins of this
35 # program; modified versions of the program must be marked as such and not
36 # identified as the original program.
37 from django.contrib import admin
38 from django.contrib.auth.models import User
39 from django.contrib.auth.admin import UserAdmin
40
41
42 from portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification
43
44
45 class ClassAdmin(admin.ModelAdmin):
46 search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
47 list_filter = ['teacher']
48
49
50 class SchoolAdmin(admin.ModelAdmin):
51 search_fields = ['name', 'country', 'postcode', 'town']
52 list_filter = ['postcode', 'country']
53
54
55 class StudentAdmin(admin.ModelAdmin):
56 search_fields = ['new_user__first_name', 'new_user__last_name']
57 list_filter = ['class_field', 'class_field__teacher']
58
59
60 class TeacherAdmin(admin.ModelAdmin):
61 search_fields = ['new_user__first_name', 'new_user__last_name']
62 list_filter = ['school']
63
64
65 class UserProfileAdmin(admin.ModelAdmin):
66 search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
67 list_filter = ['user__date_joined']
68 list_display = ['user', 'joined_recently']
69
70
71 class EmailVerificationAdmin(admin.ModelAdmin):
72 search_fields = ['new_user']
73
74
75 UserAdmin.list_display += ('date_joined',)
76 UserAdmin.list_filter += ('date_joined',)
77
78
79 admin.site.register(Class, ClassAdmin)
80 admin.site.register(Student, StudentAdmin)
81 admin.site.register(Guardian)
82 admin.site.register(Teacher, TeacherAdmin)
83 admin.site.register(School, SchoolAdmin)
84 admin.site.unregister(User)
85 admin.site.register(User, UserAdmin)
86 admin.site.register(UserProfile, UserProfileAdmin)
87 admin.site.register(FrontPageNews)
88 admin.site.register(EmailVerification, EmailVerificationAdmin)
89
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/portal/admin.py b/portal/admin.py
--- a/portal/admin.py
+++ b/portal/admin.py
@@ -45,6 +45,7 @@
class ClassAdmin(admin.ModelAdmin):
search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']
list_filter = ['teacher']
+ readonly_fields = ['teacher']
class SchoolAdmin(admin.ModelAdmin):
@@ -55,17 +56,22 @@
class StudentAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['class_field', 'class_field__teacher']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['class_field', 'pending_class_request']
class TeacherAdmin(admin.ModelAdmin):
search_fields = ['new_user__first_name', 'new_user__last_name']
list_filter = ['school']
+ readonly_fields = ['user', 'new_user']
+ raw_id_fields = ['school', 'pending_join_request']
class UserProfileAdmin(admin.ModelAdmin):
search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']
list_filter = ['user__date_joined']
list_display = ['user', 'joined_recently']
+ readonly_fields = ['user']
class EmailVerificationAdmin(admin.ModelAdmin):
|
{"golden_diff": "diff --git a/portal/admin.py b/portal/admin.py\n--- a/portal/admin.py\n+++ b/portal/admin.py\n@@ -45,6 +45,7 @@\n class ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n+ readonly_fields = ['teacher']\n \n \n class SchoolAdmin(admin.ModelAdmin):\n@@ -55,17 +56,22 @@\n class StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['class_field', 'pending_class_request']\n \n \n class TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n+ readonly_fields = ['user', 'new_user']\n+ raw_id_fields = ['school', 'pending_join_request']\n \n \n class UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n+ readonly_fields = ['user']\n \n \n class EmailVerificationAdmin(admin.ModelAdmin):\n", "issue": "From django administration page, in Portal, can't access Teachers or Students\nTrying to access a Student or Teacher from the administration page leads to an error:\nFailed to load resource: the server responded with a status of 500 (OK)\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n# Code for Life\n#\n# Copyright (C) 2016, Ocado Innovation Limited\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n#\n# ADDITIONAL TERMS \u2013 Section 7 GNU General Public Licence\n#\n# This licence does not grant any right, title or interest in any \u201cOcado\u201d logos,\n# trade names or the trademark \u201cOcado\u201d or any other trademarks or domain names\n# owned by Ocado Innovation Limited or the Ocado group of companies or any other\n# distinctive brand features of \u201cOcado\u201d as may be secured from time to time. You\n# must not distribute any modification of this program using the trademark\n# \u201cOcado\u201d or claim any affiliation or association with Ocado or its employees.\n#\n# You are not authorised to use the name Ocado (or any of its trade names) or\n# the names of any author or contributor in advertising or for publicity purposes\n# pertaining to the distribution of this program, without the prior written\n# authorisation of Ocado.\n#\n# Any propagation, distribution or conveyance of this program must include this\n# copyright notice and these terms. You must not misrepresent the origins of this\n# program; modified versions of the program must be marked as such and not\n# identified as the original program.\nfrom django.contrib import admin\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.admin import UserAdmin\n\n\nfrom portal.models import Class, Student, Guardian, Teacher, School, UserProfile, FrontPageNews, EmailVerification\n\n\nclass ClassAdmin(admin.ModelAdmin):\n search_fields = ['name', 'teacher__new_user__first_name', 'teacher__new_user__last_name']\n list_filter = ['teacher']\n readonly_fields = ['teacher']\n\n\nclass SchoolAdmin(admin.ModelAdmin):\n search_fields = ['name', 'country', 'postcode', 'town']\n list_filter = ['postcode', 'country']\n\n\nclass StudentAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['class_field', 'class_field__teacher']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['class_field', 'pending_class_request']\n\n\nclass TeacherAdmin(admin.ModelAdmin):\n search_fields = ['new_user__first_name', 'new_user__last_name']\n list_filter = ['school']\n readonly_fields = ['user', 'new_user']\n raw_id_fields = ['school', 'pending_join_request']\n\n\nclass UserProfileAdmin(admin.ModelAdmin):\n search_fields = ['user__first_name', 'user__last_name', 'new_username', 'user__date_joined']\n list_filter = ['user__date_joined']\n list_display = ['user', 'joined_recently']\n readonly_fields = ['user']\n\n\nclass EmailVerificationAdmin(admin.ModelAdmin):\n search_fields = ['new_user']\n\n\nUserAdmin.list_display += ('date_joined',)\nUserAdmin.list_filter += ('date_joined',)\n\n\nadmin.site.register(Class, ClassAdmin)\nadmin.site.register(Student, StudentAdmin)\nadmin.site.register(Guardian)\nadmin.site.register(Teacher, TeacherAdmin)\nadmin.site.register(School, SchoolAdmin)\nadmin.site.unregister(User)\nadmin.site.register(User, UserAdmin)\nadmin.site.register(UserProfile, UserProfileAdmin)\nadmin.site.register(FrontPageNews)\nadmin.site.register(EmailVerification, EmailVerificationAdmin)\n", "path": "portal/admin.py"}]}
| 1,266 | 303 |
gh_patches_debug_22348
|
rasdani/github-patches
|
git_diff
|
hedyorg__hedy-687
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Link to latests shared program is empty

(see link at the bottom)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `website/teacher.py`
Content:
```
1 from website.auth import requires_login, is_teacher, current_user
2 import utils
3 import uuid
4 from flask import request, jsonify, redirect
5 from flask_helpers import render_template
6 import os
7 import hedyweb
8 TRANSLATIONS = hedyweb.Translations ()
9 from config import config
10 cookie_name = config ['session'] ['cookie_name']
11
12 def routes (app, database, requested_lang):
13 global DATABASE
14 DATABASE = database
15
16 from app import render_main_menu
17
18 @app.route('/class/<class_id>', methods=['GET'])
19 @requires_login
20 def get_class (user, class_id):
21 if not is_teacher (request):
22 return 'Only teachers can retrieve classes', 403
23 Class = DATABASE.get_class (class_id)
24 if not Class or Class ['teacher'] != user ['username']:
25 return 'No such class', 404
26 students = []
27 for student_username in Class.get ('students', []):
28 student = DATABASE.user_by_username (student_username)
29 programs = DATABASE.programs_for_user(student_username)
30 highest_level = max(program['level'] for program in programs) if len(programs) else 0
31 sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
32 latest_shared = sorted_public_programs[-1] if sorted_public_programs else None
33 students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
34
35 if utils.is_testing_request (request):
36 return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
37 return render_template ('class-overview.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})
38
39 @app.route('/class', methods=['POST'])
40 @requires_login
41 def create_class (user):
42 if not is_teacher (request):
43 return 'Only teachers can create classes', 403
44
45 body = request.json
46 # Validations
47 if not isinstance(body, dict):
48 return 'body must be an object', 400
49 if not isinstance(body.get('name'), str):
50 return 'name must be a string', 400
51
52 Class = {
53 'id': uuid.uuid4().hex,
54 'date': utils.timems (),
55 'teacher': user ['username'],
56 'link': utils.random_id_generator (7),
57 'name': body ['name']
58 }
59
60 DATABASE.store_class (Class)
61
62 return {}, 200
63
64 @app.route('/class/<class_id>', methods=['PUT'])
65 @requires_login
66 def update_class (user, class_id):
67 if not is_teacher (request):
68 return 'Only teachers can update classes', 403
69
70 body = request.json
71 # Validations
72 if not isinstance(body, dict):
73 return 'body must be an object', 400
74 if not isinstance(body.get('name'), str):
75 return 'name must be a string', 400
76
77 Class = DATABASE.get_class (class_id)
78 if not Class or Class ['teacher'] != user ['username']:
79 return 'No such class', 404
80
81 Class = DATABASE.update_class (class_id, body ['name'])
82
83 return {}, 200
84
85 @app.route('/class/<class_id>', methods=['DELETE'])
86 @requires_login
87 def delete_class (user, class_id):
88 Class = DATABASE.get_class (class_id)
89 if not Class or Class ['teacher'] != user ['username']:
90 return 'No such class', 404
91
92 DATABASE.delete_class (Class)
93
94 return {}, 200
95
96 @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])
97 def prejoin_class (class_id, link):
98 Class = DATABASE.get_class (class_id)
99 if not Class or Class ['link'] != link:
100 return 'No such class', 404
101 user = {}
102 if request.cookies.get (cookie_name):
103 token = DATABASE.get_token(request.cookies.get (cookie_name))
104 if token:
105 user = DATABASE.user_by_username(token ['username'])
106
107 return render_template ('class-prejoin.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + requested_lang (), 'name': Class ['name']})
108
109 @app.route('/class/<class_id>/join/<link>', methods=['GET'])
110 @requires_login
111 def join_class (user, class_id, link):
112 Class = DATABASE.get_class (class_id)
113 if not Class or Class ['link'] != link:
114 return 'No such class', 404
115
116 DATABASE.add_student_to_class (Class ['id'], user ['username'])
117
118 return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)
119
120 @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])
121 @requires_login
122 def leave_class (user, class_id, student_id):
123
124 Class = DATABASE.get_class (class_id)
125 if not Class or Class ['teacher'] != user ['username']:
126 return 'No such class', 404
127
128 DATABASE.remove_student_from_class (Class ['id'], student_id)
129
130 return {}, 200
131
132 @app.route('/hedy/l/<link_id>', methods=['GET'])
133 def resolve_class_link (link_id):
134 Class = DATABASE.resolve_class_link (link_id)
135 if not Class:
136 return 'Invalid link', 404
137 return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)
138
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/website/teacher.py b/website/teacher.py
--- a/website/teacher.py
+++ b/website/teacher.py
@@ -29,7 +29,11 @@
programs = DATABASE.programs_for_user(student_username)
highest_level = max(program['level'] for program in programs) if len(programs) else 0
sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))
- latest_shared = sorted_public_programs[-1] if sorted_public_programs else None
+ if sorted_public_programs:
+ latest_shared = sorted_public_programs[-1]
+ latest_shared['link'] = os.getenv ('BASE_URL') + f"hedy/{latest_shared['id']}/view"
+ else:
+ latest_shared = None
students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})
if utils.is_testing_request (request):
|
{"golden_diff": "diff --git a/website/teacher.py b/website/teacher.py\n--- a/website/teacher.py\n+++ b/website/teacher.py\n@@ -29,7 +29,11 @@\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n- latest_shared = sorted_public_programs[-1] if sorted_public_programs else None\n+ if sorted_public_programs:\n+ latest_shared = sorted_public_programs[-1]\n+ latest_shared['link'] = os.getenv ('BASE_URL') + f\"hedy/{latest_shared['id']}/view\"\n+ else:\n+ latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n \n if utils.is_testing_request (request):\n", "issue": "Link to latests shared program is empty\n\r\n\r\n(see link at the bottom)\n", "before_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database, requested_lang):\n global DATABASE\n DATABASE = database\n\n from app import render_main_menu\n\n @app.route('/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n if not is_teacher (request):\n return 'Only teachers can retrieve classes', 403\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n latest_shared = sorted_public_programs[-1] if sorted_public_programs else None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher (request):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher (request):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return 'No such class', 404\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + requested_lang (), 'name': Class ['name']})\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return 'No such class', 404\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return 'Invalid link', 404\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}], "after_files": [{"content": "from website.auth import requires_login, is_teacher, current_user\nimport utils\nimport uuid\nfrom flask import request, jsonify, redirect\nfrom flask_helpers import render_template\nimport os\nimport hedyweb\nTRANSLATIONS = hedyweb.Translations ()\nfrom config import config\ncookie_name = config ['session'] ['cookie_name']\n\ndef routes (app, database, requested_lang):\n global DATABASE\n DATABASE = database\n\n from app import render_main_menu\n\n @app.route('/class/<class_id>', methods=['GET'])\n @requires_login\n def get_class (user, class_id):\n if not is_teacher (request):\n return 'Only teachers can retrieve classes', 403\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n students = []\n for student_username in Class.get ('students', []):\n student = DATABASE.user_by_username (student_username)\n programs = DATABASE.programs_for_user(student_username)\n highest_level = max(program['level'] for program in programs) if len(programs) else 0\n sorted_public_programs = list(sorted([program for program in programs if program.get ('public')], key=lambda p: p['date']))\n if sorted_public_programs:\n latest_shared = sorted_public_programs[-1]\n latest_shared['link'] = os.getenv ('BASE_URL') + f\"hedy/{latest_shared['id']}/view\"\n else:\n latest_shared = None\n students.append ({'username': student_username, 'last_login': utils.mstoisostring (student ['last_login']), 'programs': len (programs), 'highest_level': highest_level, 'latest_shared': latest_shared})\n\n if utils.is_testing_request (request):\n return jsonify ({'students': students, 'link': Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n return render_template ('class-overview.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'students': students, 'link': os.getenv ('BASE_URL') + '/hedy/l/' + Class ['link'], 'name': Class ['name'], 'id': Class ['id']})\n\n @app.route('/class', methods=['POST'])\n @requires_login\n def create_class (user):\n if not is_teacher (request):\n return 'Only teachers can create classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = {\n 'id': uuid.uuid4().hex,\n 'date': utils.timems (),\n 'teacher': user ['username'],\n 'link': utils.random_id_generator (7),\n 'name': body ['name']\n }\n\n DATABASE.store_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['PUT'])\n @requires_login\n def update_class (user, class_id):\n if not is_teacher (request):\n return 'Only teachers can update classes', 403\n\n body = request.json\n # Validations\n if not isinstance(body, dict):\n return 'body must be an object', 400\n if not isinstance(body.get('name'), str):\n return 'name must be a string', 400\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n Class = DATABASE.update_class (class_id, body ['name'])\n\n return {}, 200\n\n @app.route('/class/<class_id>', methods=['DELETE'])\n @requires_login\n def delete_class (user, class_id):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.delete_class (Class)\n\n return {}, 200\n\n @app.route('/class/<class_id>/prejoin/<link>', methods=['GET'])\n def prejoin_class (class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return 'No such class', 404\n user = {}\n if request.cookies.get (cookie_name):\n token = DATABASE.get_token(request.cookies.get (cookie_name))\n if token:\n user = DATABASE.user_by_username(token ['username'])\n\n return render_template ('class-prejoin.html', lang=requested_lang (), auth=TRANSLATIONS.get_translations (requested_lang (), 'Auth'), menu=render_main_menu('my-profile'), username=current_user (request) ['username'], current_page='my-profile', class_info={'link': os.getenv ('BASE_URL') + '/class/' + Class ['id'] + '/join/' + Class ['link'] + '?lang=' + requested_lang (), 'name': Class ['name']})\n\n @app.route('/class/<class_id>/join/<link>', methods=['GET'])\n @requires_login\n def join_class (user, class_id, link):\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['link'] != link:\n return 'No such class', 404\n\n DATABASE.add_student_to_class (Class ['id'], user ['username'])\n\n return redirect(request.url.replace('/class/' + class_id + '/join/' + link, '/my-profile'), code=302)\n\n @app.route('/class/<class_id>/student/<student_id>', methods=['DELETE'])\n @requires_login\n def leave_class (user, class_id, student_id):\n\n Class = DATABASE.get_class (class_id)\n if not Class or Class ['teacher'] != user ['username']:\n return 'No such class', 404\n\n DATABASE.remove_student_from_class (Class ['id'], student_id)\n\n return {}, 200\n\n @app.route('/hedy/l/<link_id>', methods=['GET'])\n def resolve_class_link (link_id):\n Class = DATABASE.resolve_class_link (link_id)\n if not Class:\n return 'Invalid link', 404\n return redirect(request.url.replace('/hedy/l/' + link_id, '/class/' + Class ['id'] + '/prejoin/' + link_id), code=302)\n", "path": "website/teacher.py"}]}
| 2,063 | 242 |
gh_patches_debug_27236
|
rasdani/github-patches
|
git_diff
|
redis__redis-py-2324
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add support for WITHSUFFIXTRIE to FT.CREATE
RediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `redis/commands/search/field.py`
Content:
```
1 from typing import List
2
3 from redis import DataError
4
5
6 class Field:
7
8 NUMERIC = "NUMERIC"
9 TEXT = "TEXT"
10 WEIGHT = "WEIGHT"
11 GEO = "GEO"
12 TAG = "TAG"
13 VECTOR = "VECTOR"
14 SORTABLE = "SORTABLE"
15 NOINDEX = "NOINDEX"
16 AS = "AS"
17
18 def __init__(
19 self,
20 name: str,
21 args: List[str] = None,
22 sortable: bool = False,
23 no_index: bool = False,
24 as_name: str = None,
25 ):
26 if args is None:
27 args = []
28 self.name = name
29 self.args = args
30 self.args_suffix = list()
31 self.as_name = as_name
32
33 if sortable:
34 self.args_suffix.append(Field.SORTABLE)
35 if no_index:
36 self.args_suffix.append(Field.NOINDEX)
37
38 if no_index and not sortable:
39 raise ValueError("Non-Sortable non-Indexable fields are ignored")
40
41 def append_arg(self, value):
42 self.args.append(value)
43
44 def redis_args(self):
45 args = [self.name]
46 if self.as_name:
47 args += [self.AS, self.as_name]
48 args += self.args
49 args += self.args_suffix
50 return args
51
52
53 class TextField(Field):
54 """
55 TextField is used to define a text field in a schema definition
56 """
57
58 NOSTEM = "NOSTEM"
59 PHONETIC = "PHONETIC"
60
61 def __init__(
62 self,
63 name: str,
64 weight: float = 1.0,
65 no_stem: bool = False,
66 phonetic_matcher: str = None,
67 **kwargs,
68 ):
69 Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
70
71 if no_stem:
72 Field.append_arg(self, self.NOSTEM)
73 if phonetic_matcher and phonetic_matcher in [
74 "dm:en",
75 "dm:fr",
76 "dm:pt",
77 "dm:es",
78 ]:
79 Field.append_arg(self, self.PHONETIC)
80 Field.append_arg(self, phonetic_matcher)
81
82
83 class NumericField(Field):
84 """
85 NumericField is used to define a numeric field in a schema definition
86 """
87
88 def __init__(self, name: str, **kwargs):
89 Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)
90
91
92 class GeoField(Field):
93 """
94 GeoField is used to define a geo-indexing field in a schema definition
95 """
96
97 def __init__(self, name: str, **kwargs):
98 Field.__init__(self, name, args=[Field.GEO], **kwargs)
99
100
101 class TagField(Field):
102 """
103 TagField is a tag-indexing field with simpler compression and tokenization.
104 See http://redisearch.io/Tags/
105 """
106
107 SEPARATOR = "SEPARATOR"
108 CASESENSITIVE = "CASESENSITIVE"
109
110 def __init__(
111 self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
112 ):
113 args = [Field.TAG, self.SEPARATOR, separator]
114 if case_sensitive:
115 args.append(self.CASESENSITIVE)
116
117 Field.__init__(self, name, args=args, **kwargs)
118
119
120 class VectorField(Field):
121 """
122 Allows vector similarity queries against the value in this attribute.
123 See https://oss.redis.com/redisearch/Vectors/#vector_fields.
124 """
125
126 def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):
127 """
128 Create Vector Field. Notice that Vector cannot have sortable or no_index tag,
129 although it's also a Field.
130
131 ``name`` is the name of the field.
132
133 ``algorithm`` can be "FLAT" or "HNSW".
134
135 ``attributes`` each algorithm can have specific attributes. Some of them
136 are mandatory and some of them are optional. See
137 https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm
138 for more information.
139 """
140 sort = kwargs.get("sortable", False)
141 noindex = kwargs.get("no_index", False)
142
143 if sort or noindex:
144 raise DataError("Cannot set 'sortable' or 'no_index' in Vector fields.")
145
146 if algorithm.upper() not in ["FLAT", "HNSW"]:
147 raise DataError(
148 "Realtime vector indexing supporting 2 Indexing Methods:"
149 "'FLAT' and 'HNSW'."
150 )
151
152 attr_li = []
153
154 for key, value in attributes.items():
155 attr_li.extend([key, value])
156
157 Field.__init__(
158 self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs
159 )
160
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py
--- a/redis/commands/search/field.py
+++ b/redis/commands/search/field.py
@@ -64,6 +64,7 @@
weight: float = 1.0,
no_stem: bool = False,
phonetic_matcher: str = None,
+ withsuffixtrie: bool = False,
**kwargs,
):
Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)
@@ -78,6 +79,8 @@
]:
Field.append_arg(self, self.PHONETIC)
Field.append_arg(self, phonetic_matcher)
+ if withsuffixtrie:
+ Field.append_arg(self, "WITHSUFFIXTRIE")
class NumericField(Field):
@@ -108,11 +111,18 @@
CASESENSITIVE = "CASESENSITIVE"
def __init__(
- self, name: str, separator: str = ",", case_sensitive: bool = False, **kwargs
+ self,
+ name: str,
+ separator: str = ",",
+ case_sensitive: bool = False,
+ withsuffixtrie: bool = False,
+ **kwargs,
):
args = [Field.TAG, self.SEPARATOR, separator]
if case_sensitive:
args.append(self.CASESENSITIVE)
+ if withsuffixtrie:
+ args.append("WITHSUFFIXTRIE")
Field.__init__(self, name, args=args, **kwargs)
|
{"golden_diff": "diff --git a/redis/commands/search/field.py b/redis/commands/search/field.py\n--- a/redis/commands/search/field.py\n+++ b/redis/commands/search/field.py\n@@ -64,6 +64,7 @@\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n+ withsuffixtrie: bool = False,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n@@ -78,6 +79,8 @@\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n+ if withsuffixtrie:\n+ Field.append_arg(self, \"WITHSUFFIXTRIE\")\n \n \n class NumericField(Field):\n@@ -108,11 +111,18 @@\n CASESENSITIVE = \"CASESENSITIVE\"\n \n def __init__(\n- self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n+ self,\n+ name: str,\n+ separator: str = \",\",\n+ case_sensitive: bool = False,\n+ withsuffixtrie: bool = False,\n+ **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n+ if withsuffixtrie:\n+ args.append(\"WITHSUFFIXTRIE\")\n \n Field.__init__(self, name, args=args, **kwargs)\n", "issue": "Add support for WITHSUFFIXTRIE to FT.CREATE \nRediSearch now supports another option (WITHSUFFIXTRIE) during index creation. We need to extend the [FT.CREATE](https://sourcegraph.com/github.com/RediSearch/RediSearch/-/blob/docs/commands/ft.create.md) calls to support this\n", "before_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self, name: str, separator: str = \",\", case_sensitive: bool = False, **kwargs\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs\n )\n", "path": "redis/commands/search/field.py"}], "after_files": [{"content": "from typing import List\n\nfrom redis import DataError\n\n\nclass Field:\n\n NUMERIC = \"NUMERIC\"\n TEXT = \"TEXT\"\n WEIGHT = \"WEIGHT\"\n GEO = \"GEO\"\n TAG = \"TAG\"\n VECTOR = \"VECTOR\"\n SORTABLE = \"SORTABLE\"\n NOINDEX = \"NOINDEX\"\n AS = \"AS\"\n\n def __init__(\n self,\n name: str,\n args: List[str] = None,\n sortable: bool = False,\n no_index: bool = False,\n as_name: str = None,\n ):\n if args is None:\n args = []\n self.name = name\n self.args = args\n self.args_suffix = list()\n self.as_name = as_name\n\n if sortable:\n self.args_suffix.append(Field.SORTABLE)\n if no_index:\n self.args_suffix.append(Field.NOINDEX)\n\n if no_index and not sortable:\n raise ValueError(\"Non-Sortable non-Indexable fields are ignored\")\n\n def append_arg(self, value):\n self.args.append(value)\n\n def redis_args(self):\n args = [self.name]\n if self.as_name:\n args += [self.AS, self.as_name]\n args += self.args\n args += self.args_suffix\n return args\n\n\nclass TextField(Field):\n \"\"\"\n TextField is used to define a text field in a schema definition\n \"\"\"\n\n NOSTEM = \"NOSTEM\"\n PHONETIC = \"PHONETIC\"\n\n def __init__(\n self,\n name: str,\n weight: float = 1.0,\n no_stem: bool = False,\n phonetic_matcher: str = None,\n withsuffixtrie: bool = False,\n **kwargs,\n ):\n Field.__init__(self, name, args=[Field.TEXT, Field.WEIGHT, weight], **kwargs)\n\n if no_stem:\n Field.append_arg(self, self.NOSTEM)\n if phonetic_matcher and phonetic_matcher in [\n \"dm:en\",\n \"dm:fr\",\n \"dm:pt\",\n \"dm:es\",\n ]:\n Field.append_arg(self, self.PHONETIC)\n Field.append_arg(self, phonetic_matcher)\n if withsuffixtrie:\n Field.append_arg(self, \"WITHSUFFIXTRIE\")\n\n\nclass NumericField(Field):\n \"\"\"\n NumericField is used to define a numeric field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.NUMERIC], **kwargs)\n\n\nclass GeoField(Field):\n \"\"\"\n GeoField is used to define a geo-indexing field in a schema definition\n \"\"\"\n\n def __init__(self, name: str, **kwargs):\n Field.__init__(self, name, args=[Field.GEO], **kwargs)\n\n\nclass TagField(Field):\n \"\"\"\n TagField is a tag-indexing field with simpler compression and tokenization.\n See http://redisearch.io/Tags/\n \"\"\"\n\n SEPARATOR = \"SEPARATOR\"\n CASESENSITIVE = \"CASESENSITIVE\"\n\n def __init__(\n self,\n name: str,\n separator: str = \",\",\n case_sensitive: bool = False,\n withsuffixtrie: bool = False,\n **kwargs,\n ):\n args = [Field.TAG, self.SEPARATOR, separator]\n if case_sensitive:\n args.append(self.CASESENSITIVE)\n if withsuffixtrie:\n args.append(\"WITHSUFFIXTRIE\")\n\n Field.__init__(self, name, args=args, **kwargs)\n\n\nclass VectorField(Field):\n \"\"\"\n Allows vector similarity queries against the value in this attribute.\n See https://oss.redis.com/redisearch/Vectors/#vector_fields.\n \"\"\"\n\n def __init__(self, name: str, algorithm: str, attributes: dict, **kwargs):\n \"\"\"\n Create Vector Field. Notice that Vector cannot have sortable or no_index tag,\n although it's also a Field.\n\n ``name`` is the name of the field.\n\n ``algorithm`` can be \"FLAT\" or \"HNSW\".\n\n ``attributes`` each algorithm can have specific attributes. Some of them\n are mandatory and some of them are optional. See\n https://oss.redis.com/redisearch/master/Vectors/#specific_creation_attributes_per_algorithm\n for more information.\n \"\"\"\n sort = kwargs.get(\"sortable\", False)\n noindex = kwargs.get(\"no_index\", False)\n\n if sort or noindex:\n raise DataError(\"Cannot set 'sortable' or 'no_index' in Vector fields.\")\n\n if algorithm.upper() not in [\"FLAT\", \"HNSW\"]:\n raise DataError(\n \"Realtime vector indexing supporting 2 Indexing Methods:\"\n \"'FLAT' and 'HNSW'.\"\n )\n\n attr_li = []\n\n for key, value in attributes.items():\n attr_li.extend([key, value])\n\n Field.__init__(\n self, name, args=[Field.VECTOR, algorithm, len(attr_li), *attr_li], **kwargs\n )\n", "path": "redis/commands/search/field.py"}]}
| 1,765 | 353 |
gh_patches_debug_37669
|
rasdani/github-patches
|
git_diff
|
electricitymaps__electricitymaps-contrib-1577
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add JP prices
See https://github.com/tmrowco/electricitymap-contrib/pull/1543#issuecomment-411281685 by @tmslaine
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `parsers/JP.py`
Content:
```
1 #!/usr/bin/env python3
2 # coding=utf-8
3 import logging
4 # The arrow library is used to handle datetimes
5 import arrow
6 import pandas as pd
7 from . import occtonet
8
9 # Abbreviations
10 # JP-HKD : Hokkaido
11 # JP-TH : Tohoku
12 # JP-TK : Tokyo area
13 # JP-CB : Chubu
14 # JP-HR : Hokuriku
15 # JP-KN : Kansai
16 # JP-SK : Shikoku
17 # JP-KY : Kyushu
18 # JP-ON : Okinawa
19
20 def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,
21 logger=logging.getLogger(__name__)):
22 """
23 Calculates production from consumption and imports for a given area
24 All production is mapped to unknown
25 """
26 if target_datetime:
27 raise NotImplementedError(
28 'This parser is not yet able to parse past dates')
29 exch_map = {
30 'JP-HKD':['JP-TH'],
31 'JP-TH':['JP-TK'],
32 'JP-TK':['JP-TH', 'JP-CB'],
33 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],
34 'JP-HR':['JP-CB', 'JP-KN'],
35 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],
36 'JP-SK':['JP-KN', 'JP-CG'],
37 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']
38 }
39 df = fetch_consumption_df(zone_key, target_datetime)
40 df['imports'] = 0
41 for zone in exch_map[zone_key]:
42 df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)
43 df2 = pd.DataFrame(df2)
44 exchname = df2.loc[0, 'sortedZoneKeys']
45 df2 = df2[['datetime', 'netFlow']]
46 df2.columns = ['datetime', exchname]
47 df = pd.merge(df, df2, how='inner', on='datetime')
48 if exchname.split('->')[-1] == zone_key:
49 df['imports'] = df['imports']+df[exchname]
50 else:
51 df['imports'] = df['imports']-df[exchname]
52 df['prod'] = df['cons']-df['imports']
53 df = df[['datetime', 'prod']]
54 # add a row to production for each entry in the dictionary:
55 sources = {
56 'JP-HKD':'denkiyoho.hepco.co.jp',
57 'JP-TH':'setsuden.tohoku-epco.co.jp',
58 'JP-TK':'www.tepco.co.jp',
59 'JP-CB':'denki-yoho.chuden.jp',
60 'JP-HR':'www.rikuden.co.jp/denki-yoho',
61 'JP-KN':'www.kepco.co.jp',
62 'JP-SK':'www.energia.co.jp',
63 'JP-CG':'www.yonden.co.jp'
64 }
65 datalist = []
66 for i in range(df.shape[0]):
67 data = {
68 'zoneKey': zone_key,
69 'datetime': df.loc[i, 'datetime'].to_pydatetime(),
70 'production': {
71 'biomass': None,
72 'coal': None,
73 'gas': None,
74 'hydro': None,
75 'nuclear': None,
76 'oil': None,
77 'solar': None,
78 'wind': None,
79 'geothermal': None,
80 'unknown': df.loc[i, 'prod']
81 },
82 'storage': {},
83 'source': ['occtonet.or.jp', sources[zone_key]]
84 }
85 datalist.append(data)
86 return datalist
87
88
89 def fetch_consumption_df(zone_key='JP-TK', target_datetime=None,
90 logger=logging.getLogger(__name__)):
91 """
92 Returns the consumption for an area as a pandas DataFrame
93 """
94 datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')
95 consumption_url = {
96 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),
97 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),
98 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',
99 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),
100 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',
101 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',
102 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),
103 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'
104 }
105 # First roughly 40 rows of the consumption files have hourly data,
106 # the parser skips to the rows with 5-min actual values
107 if zone_key == 'JP-KN':
108 startrow = 44
109 else:
110 startrow = 42
111 df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),
112 encoding='shift-jis')
113 df.columns = ['Date', 'Time', 'cons']
114 # Convert 万kW to MW
115 df['cons'] = 10*df['cons']
116 df = df.dropna()
117 df['datetime'] = df.apply(parse_dt, axis=1)
118 df = df[['datetime', 'cons']]
119 return df
120
121 def parse_dt(row):
122 """
123 Parses timestamps from date and time
124 """
125 return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),
126 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime
127
128 if __name__ == '__main__':
129 """Main method, never used by the Electricity Map backend, but handy for testing."""
130
131 print('fetch_production() ->')
132 print(fetch_production())
133
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/parsers/JP.py b/parsers/JP.py
--- a/parsers/JP.py
+++ b/parsers/JP.py
@@ -3,6 +3,7 @@
import logging
# The arrow library is used to handle datetimes
import arrow
+import datetime as dt
import pandas as pd
from . import occtonet
@@ -16,6 +17,8 @@
# JP-SK : Shikoku
# JP-KY : Kyushu
# JP-ON : Okinawa
+# JP-CG : Chūgoku
+
def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
@@ -118,6 +121,47 @@
df = df[['datetime', 'cons']]
return df
+
+def fetch_price(zone_key='JP-TK', session=None, target_datetime=None,
+ logger=logging.getLogger(__name__)):
+ if target_datetime is None:
+ target_datetime = dt.datetime.now() + dt.timedelta(days=1)
+
+ # price files contain data for fiscal year and not calendar year.
+ if target_datetime.month <= 3:
+ fiscal_year = target_datetime.year - 1
+ else:
+ fiscal_year = target_datetime.year
+ url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)
+ df = pd.read_csv(url)
+
+ df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]
+ df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',
+ 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']
+
+ if zone_key not in df.columns[2:]:
+ return []
+
+ start = target_datetime - dt.timedelta(days=1)
+ df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))
+ df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]
+
+ df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(
+ minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)
+
+ data = list()
+ for row in df.iterrows():
+ data.append({
+ 'zoneKey': zone_key,
+ 'currency': 'JPY',
+ 'datetime': row[1]['datetime'].datetime,
+ 'price': row[1][zone_key],
+ 'source': 'jepx.org'
+ })
+
+ return data
+
+
def parse_dt(row):
"""
Parses timestamps from date and time
@@ -125,8 +169,11 @@
return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),
'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime
+
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy for testing."""
print('fetch_production() ->')
print(fetch_production())
+ print('fetch_price() ->')
+ print(fetch_price())
|
{"golden_diff": "diff --git a/parsers/JP.py b/parsers/JP.py\n--- a/parsers/JP.py\n+++ b/parsers/JP.py\n@@ -3,6 +3,7 @@\n import logging\n # The arrow library is used to handle datetimes\n import arrow\n+import datetime as dt\n import pandas as pd\n from . import occtonet\n \n@@ -16,6 +17,8 @@\n # JP-SK : Shikoku\n # JP-KY : Kyushu\n # JP-ON : Okinawa\n+# JP-CG : Ch\u016bgoku\n+\n \n def fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n@@ -118,6 +121,47 @@\n df = df[['datetime', 'cons']]\n return df\n \n+\n+def fetch_price(zone_key='JP-TK', session=None, target_datetime=None,\n+ logger=logging.getLogger(__name__)):\n+ if target_datetime is None:\n+ target_datetime = dt.datetime.now() + dt.timedelta(days=1)\n+\n+ # price files contain data for fiscal year and not calendar year.\n+ if target_datetime.month <= 3:\n+ fiscal_year = target_datetime.year - 1\n+ else:\n+ fiscal_year = target_datetime.year\n+ url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)\n+ df = pd.read_csv(url)\n+\n+ df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]\n+ df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',\n+ 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']\n+\n+ if zone_key not in df.columns[2:]:\n+ return []\n+\n+ start = target_datetime - dt.timedelta(days=1)\n+ df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))\n+ df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]\n+\n+ df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(\n+ minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)\n+\n+ data = list()\n+ for row in df.iterrows():\n+ data.append({\n+ 'zoneKey': zone_key,\n+ 'currency': 'JPY',\n+ 'datetime': row[1]['datetime'].datetime,\n+ 'price': row[1][zone_key],\n+ 'source': 'jepx.org'\n+ })\n+\n+ return data\n+\n+\n def parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n@@ -125,8 +169,11 @@\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n \n+\n if __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n \n print('fetch_production() ->')\n print(fetch_production())\n+ print('fetch_price() ->')\n+ print(fetch_price())\n", "issue": "Add JP prices\nSee https://github.com/tmrowco/electricitymap-contrib/pull/1543#issuecomment-411281685 by @tmslaine \n", "before_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\nimport logging\n# The arrow library is used to handle datetimes\nimport arrow\nimport pandas as pd\nfrom . import occtonet\n\n# Abbreviations\n# JP-HKD : Hokkaido\n# JP-TH : Tohoku\n# JP-TK : Tokyo area\n# JP-CB : Chubu\n# JP-HR : Hokuriku\n# JP-KN : Kansai\n# JP-SK : Shikoku\n# JP-KY : Kyushu\n# JP-ON : Okinawa\n\ndef fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Calculates production from consumption and imports for a given area\n All production is mapped to unknown\n \"\"\"\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n exch_map = {\n 'JP-HKD':['JP-TH'],\n 'JP-TH':['JP-TK'],\n 'JP-TK':['JP-TH', 'JP-CB'],\n 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],\n 'JP-HR':['JP-CB', 'JP-KN'],\n 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],\n 'JP-SK':['JP-KN', 'JP-CG'],\n 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']\n }\n df = fetch_consumption_df(zone_key, target_datetime)\n df['imports'] = 0\n for zone in exch_map[zone_key]:\n df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)\n df2 = pd.DataFrame(df2)\n exchname = df2.loc[0, 'sortedZoneKeys']\n df2 = df2[['datetime', 'netFlow']]\n df2.columns = ['datetime', exchname]\n df = pd.merge(df, df2, how='inner', on='datetime')\n if exchname.split('->')[-1] == zone_key:\n df['imports'] = df['imports']+df[exchname]\n else:\n df['imports'] = df['imports']-df[exchname]\n df['prod'] = df['cons']-df['imports']\n df = df[['datetime', 'prod']]\n # add a row to production for each entry in the dictionary:\n sources = {\n 'JP-HKD':'denkiyoho.hepco.co.jp',\n 'JP-TH':'setsuden.tohoku-epco.co.jp',\n 'JP-TK':'www.tepco.co.jp',\n 'JP-CB':'denki-yoho.chuden.jp',\n 'JP-HR':'www.rikuden.co.jp/denki-yoho',\n 'JP-KN':'www.kepco.co.jp',\n 'JP-SK':'www.energia.co.jp',\n 'JP-CG':'www.yonden.co.jp'\n }\n datalist = []\n for i in range(df.shape[0]):\n data = {\n 'zoneKey': zone_key,\n 'datetime': df.loc[i, 'datetime'].to_pydatetime(),\n 'production': {\n 'biomass': None,\n 'coal': None,\n 'gas': None,\n 'hydro': None,\n 'nuclear': None,\n 'oil': None,\n 'solar': None,\n 'wind': None,\n 'geothermal': None,\n 'unknown': df.loc[i, 'prod']\n },\n 'storage': {},\n 'source': ['occtonet.or.jp', sources[zone_key]]\n }\n datalist.append(data)\n return datalist\n\n\ndef fetch_consumption_df(zone_key='JP-TK', target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Returns the consumption for an area as a pandas DataFrame\n \"\"\"\n datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')\n consumption_url = {\n 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),\n 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),\n 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',\n 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),\n 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',\n 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',\n 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),\n 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'\n }\n # First roughly 40 rows of the consumption files have hourly data,\n # the parser skips to the rows with 5-min actual values \n if zone_key == 'JP-KN':\n startrow = 44\n else:\n startrow = 42\n df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),\n encoding='shift-jis')\n df.columns = ['Date', 'Time', 'cons']\n # Convert \u4e07kW to MW\n df['cons'] = 10*df['cons']\n df = df.dropna()\n df['datetime'] = df.apply(parse_dt, axis=1)\n df = df[['datetime', 'cons']]\n return df\n\ndef parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n \"\"\"\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n", "path": "parsers/JP.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# coding=utf-8\nimport logging\n# The arrow library is used to handle datetimes\nimport arrow\nimport datetime as dt\nimport pandas as pd\nfrom . import occtonet\n\n# Abbreviations\n# JP-HKD : Hokkaido\n# JP-TH : Tohoku\n# JP-TK : Tokyo area\n# JP-CB : Chubu\n# JP-HR : Hokuriku\n# JP-KN : Kansai\n# JP-SK : Shikoku\n# JP-KY : Kyushu\n# JP-ON : Okinawa\n# JP-CG : Ch\u016bgoku\n\n\ndef fetch_production(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Calculates production from consumption and imports for a given area\n All production is mapped to unknown\n \"\"\"\n if target_datetime:\n raise NotImplementedError(\n 'This parser is not yet able to parse past dates')\n exch_map = {\n 'JP-HKD':['JP-TH'],\n 'JP-TH':['JP-TK'],\n 'JP-TK':['JP-TH', 'JP-CB'],\n 'JP-CB':['JP-TK', 'JP-HR', 'JP-KN'],\n 'JP-HR':['JP-CB', 'JP-KN'],\n 'JP-KN':['JP-CB', 'JP-HR', 'JP-SK', 'JP-CG'],\n 'JP-SK':['JP-KN', 'JP-CG'],\n 'JP-CG':['JP-KN', 'JP-SK', 'JP-KY']\n }\n df = fetch_consumption_df(zone_key, target_datetime)\n df['imports'] = 0\n for zone in exch_map[zone_key]:\n df2 = occtonet.fetch_exchange(zone_key, zone, target_datetime)\n df2 = pd.DataFrame(df2)\n exchname = df2.loc[0, 'sortedZoneKeys']\n df2 = df2[['datetime', 'netFlow']]\n df2.columns = ['datetime', exchname]\n df = pd.merge(df, df2, how='inner', on='datetime')\n if exchname.split('->')[-1] == zone_key:\n df['imports'] = df['imports']+df[exchname]\n else:\n df['imports'] = df['imports']-df[exchname]\n df['prod'] = df['cons']-df['imports']\n df = df[['datetime', 'prod']]\n # add a row to production for each entry in the dictionary:\n sources = {\n 'JP-HKD':'denkiyoho.hepco.co.jp',\n 'JP-TH':'setsuden.tohoku-epco.co.jp',\n 'JP-TK':'www.tepco.co.jp',\n 'JP-CB':'denki-yoho.chuden.jp',\n 'JP-HR':'www.rikuden.co.jp/denki-yoho',\n 'JP-KN':'www.kepco.co.jp',\n 'JP-SK':'www.energia.co.jp',\n 'JP-CG':'www.yonden.co.jp'\n }\n datalist = []\n for i in range(df.shape[0]):\n data = {\n 'zoneKey': zone_key,\n 'datetime': df.loc[i, 'datetime'].to_pydatetime(),\n 'production': {\n 'biomass': None,\n 'coal': None,\n 'gas': None,\n 'hydro': None,\n 'nuclear': None,\n 'oil': None,\n 'solar': None,\n 'wind': None,\n 'geothermal': None,\n 'unknown': df.loc[i, 'prod']\n },\n 'storage': {},\n 'source': ['occtonet.or.jp', sources[zone_key]]\n }\n datalist.append(data)\n return datalist\n\n\ndef fetch_consumption_df(zone_key='JP-TK', target_datetime=None,\n logger=logging.getLogger(__name__)):\n \"\"\"\n Returns the consumption for an area as a pandas DataFrame\n \"\"\"\n datestamp = arrow.get(target_datetime).to('Asia/Tokyo').strftime('%Y%m%d')\n consumption_url = {\n 'JP-HKD': 'http://denkiyoho.hepco.co.jp/area/data/juyo_01_{}.csv'.format(datestamp),\n 'JP-TH': 'http://setsuden.tohoku-epco.co.jp/common/demand/juyo_02_{}.csv'.format(datestamp),\n 'JP-TK': 'http://www.tepco.co.jp/forecast/html/images/juyo-j.csv',\n 'JP-HR': 'http://www.rikuden.co.jp/denki-yoho/csv/juyo_05_{}.csv'.format(datestamp),\n 'JP-CB': 'http://denki-yoho.chuden.jp/denki_yoho_content_data/juyo_cepco003.csv',\n 'JP-KN': 'http://www.kepco.co.jp/yamasou/juyo1_kansai.csv',\n 'JP-CG': 'http://www.energia.co.jp/jukyuu/sys/juyo_07_{}.csv'.format(datestamp),\n 'JP-SK': 'http://www.yonden.co.jp/denkiyoho/juyo_shikoku.csv'\n }\n # First roughly 40 rows of the consumption files have hourly data,\n # the parser skips to the rows with 5-min actual values \n if zone_key == 'JP-KN':\n startrow = 44\n else:\n startrow = 42\n df = pd.read_csv(consumption_url[zone_key], skiprows=list(range(startrow)),\n encoding='shift-jis')\n df.columns = ['Date', 'Time', 'cons']\n # Convert \u4e07kW to MW\n df['cons'] = 10*df['cons']\n df = df.dropna()\n df['datetime'] = df.apply(parse_dt, axis=1)\n df = df[['datetime', 'cons']]\n return df\n\n\ndef fetch_price(zone_key='JP-TK', session=None, target_datetime=None,\n logger=logging.getLogger(__name__)):\n if target_datetime is None:\n target_datetime = dt.datetime.now() + dt.timedelta(days=1)\n\n # price files contain data for fiscal year and not calendar year.\n if target_datetime.month <= 3:\n fiscal_year = target_datetime.year - 1\n else:\n fiscal_year = target_datetime.year\n url = 'http://www.jepx.org/market/excel/spot_{}.csv'.format(fiscal_year)\n df = pd.read_csv(url)\n\n df = df.iloc[:, [0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14]]\n df.columns = ['Date', 'Period', 'JP-HKD', 'JP-TH', 'JP-TK', 'JP-CB',\n 'JP-HR', 'JP-KN', 'JP-CG', 'JP-SK', 'JP-KY']\n\n if zone_key not in df.columns[2:]:\n return []\n\n start = target_datetime - dt.timedelta(days=1)\n df['Date'] = df['Date'].apply(lambda x: dt.datetime.strptime(x, '%Y/%m/%d'))\n df = df[(df['Date'] >= start.date()) & (df['Date'] <= target_datetime.date())]\n\n df['datetime'] = df.apply(lambda row: arrow.get(row['Date']).shift(\n minutes=30 * (row['Period'] - 1)).replace(tzinfo='Asia/Tokyo'), axis=1)\n\n data = list()\n for row in df.iterrows():\n data.append({\n 'zoneKey': zone_key,\n 'currency': 'JPY',\n 'datetime': row[1]['datetime'].datetime,\n 'price': row[1][zone_key],\n 'source': 'jepx.org'\n })\n\n return data\n\n\ndef parse_dt(row):\n \"\"\"\n Parses timestamps from date and time\n \"\"\"\n return arrow.get(' '.join([row['Date'], row['Time']]).replace('/', '-'),\n 'YYYY-M-D H:mm').replace(tzinfo='Asia/Tokyo').datetime\n\n\nif __name__ == '__main__':\n \"\"\"Main method, never used by the Electricity Map backend, but handy for testing.\"\"\"\n\n print('fetch_production() ->')\n print(fetch_production())\n print('fetch_price() ->')\n print(fetch_price())\n", "path": "parsers/JP.py"}]}
| 2,003 | 777 |
gh_patches_debug_43349
|
rasdani/github-patches
|
git_diff
|
deepchecks__deepchecks-1278
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[BUG] Maximum feature cardinality in WholeDatasetDrift is not configurable
**Describe the bug**
When running the WholeDatasetDrift on a dataset with a feature that has cardinality greater then 255, the following error is thrown by the HistGradientBoostingClassifier:
```
/usr/local/lib/python3.9/dist-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py in _check_categories(self, X)
186
187 if categories.size > self.max_bins:
--> 188 raise ValueError(
189 f"Categorical feature at index {f_idx} is "
190 "expected to have a "
ValueError: Categorical feature at index 30 is expected to have a cardinality <= 255
```
There is no way to adjust the max_bins parameter of HistGradientBoostingClassifier. Alternatively, the feature cardinality could be reduced before training the HistGradientBoostingClassifier using the max_num_categories parameter of WholeDatasetDrift
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `deepchecks/core/check_utils/whole_dataset_drift_utils.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module containing common WholeDatasetDriftCheck (domain classifier drift) utils."""
12
13 from typing import List, Optional
14 import warnings
15
16 import numpy as np
17 import pandas as pd
18
19 from sklearn.pipeline import Pipeline
20 from sklearn.compose import ColumnTransformer
21
22 with warnings.catch_warnings():
23 warnings.simplefilter('ignore')
24 from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import
25
26 from sklearn.ensemble import HistGradientBoostingClassifier
27 from sklearn.metrics import roc_auc_score
28 from sklearn.preprocessing import OrdinalEncoder
29 from sklearn.model_selection import train_test_split
30 import plotly.graph_objects as go
31
32 from deepchecks.tabular import Dataset
33 from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces
34 from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none
35 from deepchecks.utils.function import run_available_kwargs
36 from deepchecks.utils.strings import format_percent
37 from deepchecks.utils.typing import Hashable
38
39
40 def run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,
41 numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,
42 random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,
43 max_num_categories: Optional[int], min_meaningful_drift_score: float):
44 """Calculate whole dataset drift."""
45 domain_classifier = generate_model(numerical_features, cat_features, random_state)
46
47 train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)
48 test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)
49
50 # create new dataset, with label denoting whether sample belongs to test dataset
51 domain_class_df = pd.concat([train_sample_df, test_sample_df])
52 domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))
53
54 x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,
55 stratify=domain_class_labels,
56 random_state=random_state,
57 test_size=test_size)
58
59 domain_classifier = domain_classifier.fit(x_train, y_train)
60
61 y_test.name = 'belongs_to_test'
62 domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),
63 cat_features=cat_features, label='belongs_to_test')
64
65 # calculate feature importance of domain_classifier, containing the information which features separate
66 # the dataset best.
67 fi, importance_type = calculate_feature_importance_or_none(
68 domain_classifier,
69 domain_test_dataset,
70 force_permutation=True,
71 permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}
72 )
73
74 fi = fi.sort_values(ascending=False) if fi is not None else None
75
76 domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])
77 drift_score = auc_to_drift_score(domain_classifier_auc)
78
79 values_dict = {
80 'domain_classifier_auc': domain_classifier_auc,
81 'domain_classifier_drift_score': drift_score,
82 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},
83 }
84
85 feature_importance_note = f"""
86 <span>
87 The percents of explained dataset difference are the importance values for the feature calculated
88 using `{importance_type}`.
89 </span><br><br>
90 """
91
92 if fi is not None and drift_score > min_meaningful_drift_score:
93 top_fi = fi.head(n_top_columns)
94 top_fi = top_fi.loc[top_fi > min_feature_importance]
95 else:
96 top_fi = None
97
98 if top_fi is not None and len(top_fi):
99 score = values_dict['domain_classifier_drift_score']
100
101 displays = [feature_importance_note, build_drift_plot(score),
102 '<h3>Main features contributing to drift</h3>',
103 N_TOP_MESSAGE % n_top_columns]
104 displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,
105 max_num_categories)
106 for feature in top_fi.index]
107 else:
108 displays = None
109
110 return values_dict, displays
111
112
113 def generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],
114 random_state: int = 42) -> Pipeline:
115 """Generate the unfitted Domain Classifier model."""
116 categorical_transformer = Pipeline(
117 steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
118 unknown_value=np.nan,
119 dtype=np.float64))]
120 )
121
122 preprocessor = ColumnTransformer(
123 transformers=[
124 ('num', 'passthrough', numerical_columns),
125 ('cat', categorical_transformer, categorical_columns),
126 ]
127 )
128
129 return Pipeline(
130 steps=[('preprocessing', preprocessor),
131 ('model', HistGradientBoostingClassifier(
132 max_depth=2, max_iter=10, random_state=random_state,
133 categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)
134 ))])
135
136
137 def auc_to_drift_score(auc: float) -> float:
138 """Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.
139
140 Parameters
141 ----------
142 auc : float
143 auc of the Domain Classifier
144 """
145 return max(2 * auc - 1, 0)
146
147
148 def build_drift_plot(score):
149 """Build traffic light drift plot."""
150 bar_traces, x_axis, y_axis = drift_score_bar_traces(score)
151 x_axis['title'] = 'Drift score'
152 drift_plot = go.Figure(layout=dict(
153 title='Drift Score - Whole Dataset Total',
154 xaxis=x_axis,
155 yaxis=y_axis,
156 width=700,
157 height=200
158
159 ))
160
161 drift_plot.add_traces(bar_traces)
162 return drift_plot
163
164
165 def display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,
166 max_num_categories: int = 10):
167 """Display a distribution comparison plot for the given columns."""
168 column_name = train_column.name
169
170 title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'
171 traces, xaxis_layout, yaxis_layout = \
172 feature_distribution_traces(train_column.dropna(),
173 test_column.dropna(),
174 column_name,
175 is_categorical=column_name in cat_features,
176 max_num_categories=max_num_categories)
177
178 figure = go.Figure(layout=go.Layout(
179 title=title,
180 xaxis=xaxis_layout,
181 yaxis=yaxis_layout,
182 legend=dict(
183 title='Dataset',
184 yanchor='top',
185 y=0.9,
186 xanchor='left'),
187 width=700,
188 height=300
189 ))
190
191 figure.add_traces(traces)
192
193 return figure
194
```
Path: `deepchecks/utils/distribution/rare_category_encoder.py`
Content:
```
1 # ----------------------------------------------------------------------------
2 # Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)
3 #
4 # This file is part of Deepchecks.
5 # Deepchecks is distributed under the terms of the GNU Affero General
6 # Public License (version 3 or later).
7 # You should have received a copy of the GNU Affero General Public License
8 # along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.
9 # ----------------------------------------------------------------------------
10 #
11 """Module of RareCategoryEncoder."""
12 from typing import List, Optional
13 from collections import defaultdict
14
15 import pandas as pd
16
17 from deepchecks.utils.typing import Hashable
18
19
20 __all__ = ['RareCategoryEncoder']
21
22
23 class RareCategoryEncoder:
24 """Encodes rare categories into an "other" parameter.
25
26 Note that this encoder assumes data is received as a DataFrame.
27
28 Parameters
29 ----------
30 max_num_categories : int , default: 10
31 Indicates the maximum number of unique categories in a single categorical column
32 (rare categories will be changed to a form of "other")
33 cols : Optional[List[Hashable]] , default: None
34 Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`
35 """
36
37 DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'
38
39 def __init__(
40 self,
41 max_num_categories: int = 10,
42 cols: Optional[List[Hashable]] = None
43 ):
44 self.max_num_categories = max_num_categories
45 self.cols = cols
46 self._col_mapping = None
47
48 def fit(self, data: pd.DataFrame):
49 """Fit the encoder using given dataframe.
50
51 Parameters
52 ----------
53 data : pd.DataFrame
54 data to fit from
55 """
56 if self.cols is not None:
57 self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)
58 else:
59 self._col_mapping = data.apply(self._fit_for_series, axis=0)
60
61 def transform(self, data: pd.DataFrame):
62 """Transform given data according to columns processed in `fit`.
63
64 Parameters
65 ----------
66 data : pd.DataFrame
67 data to transform
68 Returns
69 -------
70 DataFrame
71 transformed data
72 """
73 if self._col_mapping is None:
74 raise RuntimeError('Cannot transform without fitting first')
75
76 if self.cols is not None:
77 data = data.copy()
78 data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))
79 else:
80 data = data.apply(lambda s: s.map(self._col_mapping[s.name]))
81 return data
82
83 def fit_transform(self, data: pd.DataFrame):
84 """Run `fit` and `transform` on given data.
85
86 Parameters
87 ----------
88 data : pd.DataFrame
89 data to fit on and transform
90 Returns
91 -------
92 DataFrame
93 transformed data
94 """
95 self.fit(data)
96 return self.transform(data)
97
98 def _fit_for_series(self, series: pd.Series):
99 top_values = list(series.value_counts().head(self.max_num_categories).index)
100 other_value = self._get_unique_other_value(series)
101 mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)
102 return mapper
103
104 def _get_unique_other_value(self, series: pd.Series):
105 unique_values = list(series.unique())
106 other = self.DEFAULT_OTHER_VALUE
107 i = 0
108 while other in unique_values:
109 other = self.DEFAULT_OTHER_VALUE + str(i)
110 i += 1
111 return other
112
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/deepchecks/core/check_utils/whole_dataset_drift_utils.py b/deepchecks/core/check_utils/whole_dataset_drift_utils.py
--- a/deepchecks/core/check_utils/whole_dataset_drift_utils.py
+++ b/deepchecks/core/check_utils/whole_dataset_drift_utils.py
@@ -31,6 +31,7 @@
from deepchecks.tabular import Dataset
from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces
+from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder
from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none
from deepchecks.utils.function import run_available_kwargs
from deepchecks.utils.strings import format_percent
@@ -114,7 +115,8 @@
random_state: int = 42) -> Pipeline:
"""Generate the unfitted Domain Classifier model."""
categorical_transformer = Pipeline(
- steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
+ steps=[('rare', RareCategoryEncoder(254)),
+ ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',
unknown_value=np.nan,
dtype=np.float64))]
)
diff --git a/deepchecks/utils/distribution/rare_category_encoder.py b/deepchecks/utils/distribution/rare_category_encoder.py
--- a/deepchecks/utils/distribution/rare_category_encoder.py
+++ b/deepchecks/utils/distribution/rare_category_encoder.py
@@ -45,18 +45,24 @@
self.cols = cols
self._col_mapping = None
- def fit(self, data: pd.DataFrame):
+ def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Fit the encoder using given dataframe.
Parameters
----------
data : pd.DataFrame
data to fit from
+ y :
+ Unused, but needed for sklearn pipeline
"""
+ self._col_mapping = {}
+
if self.cols is not None:
- self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)
+ for col in self.cols:
+ self._col_mapping[col] = self._fit_for_series(data[col])
else:
- self._col_mapping = data.apply(self._fit_for_series, axis=0)
+ for col in data.columns:
+ self._col_mapping[col] = self._fit_for_series(data[col])
def transform(self, data: pd.DataFrame):
"""Transform given data according to columns processed in `fit`.
@@ -78,15 +84,18 @@
data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))
else:
data = data.apply(lambda s: s.map(self._col_mapping[s.name]))
+
return data
- def fit_transform(self, data: pd.DataFrame):
+ def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument
"""Run `fit` and `transform` on given data.
Parameters
----------
data : pd.DataFrame
data to fit on and transform
+ y :
+ Unused, but needed for sklearn pipeline
Returns
-------
DataFrame
@@ -98,7 +107,7 @@
def _fit_for_series(self, series: pd.Series):
top_values = list(series.value_counts().head(self.max_num_categories).index)
other_value = self._get_unique_other_value(series)
- mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)
+ mapper = defaultdict(lambda: other_value, {k: k for k in top_values})
return mapper
def _get_unique_other_value(self, series: pd.Series):
|
{"golden_diff": "diff --git a/deepchecks/core/check_utils/whole_dataset_drift_utils.py b/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n--- a/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n+++ b/deepchecks/core/check_utils/whole_dataset_drift_utils.py\n@@ -31,6 +31,7 @@\n \n from deepchecks.tabular import Dataset\n from deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces\n+from deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder\n from deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none\n from deepchecks.utils.function import run_available_kwargs\n from deepchecks.utils.strings import format_percent\n@@ -114,7 +115,8 @@\n random_state: int = 42) -> Pipeline:\n \"\"\"Generate the unfitted Domain Classifier model.\"\"\"\n categorical_transformer = Pipeline(\n- steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n+ steps=[('rare', RareCategoryEncoder(254)),\n+ ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n unknown_value=np.nan,\n dtype=np.float64))]\n )\ndiff --git a/deepchecks/utils/distribution/rare_category_encoder.py b/deepchecks/utils/distribution/rare_category_encoder.py\n--- a/deepchecks/utils/distribution/rare_category_encoder.py\n+++ b/deepchecks/utils/distribution/rare_category_encoder.py\n@@ -45,18 +45,24 @@\n self.cols = cols\n self._col_mapping = None\n \n- def fit(self, data: pd.DataFrame):\n+ def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument\n \"\"\"Fit the encoder using given dataframe.\n \n Parameters\n ----------\n data : pd.DataFrame\n data to fit from\n+ y :\n+ Unused, but needed for sklearn pipeline\n \"\"\"\n+ self._col_mapping = {}\n+\n if self.cols is not None:\n- self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)\n+ for col in self.cols:\n+ self._col_mapping[col] = self._fit_for_series(data[col])\n else:\n- self._col_mapping = data.apply(self._fit_for_series, axis=0)\n+ for col in data.columns:\n+ self._col_mapping[col] = self._fit_for_series(data[col])\n \n def transform(self, data: pd.DataFrame):\n \"\"\"Transform given data according to columns processed in `fit`.\n@@ -78,15 +84,18 @@\n data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))\n else:\n data = data.apply(lambda s: s.map(self._col_mapping[s.name]))\n+\n return data\n \n- def fit_transform(self, data: pd.DataFrame):\n+ def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument\n \"\"\"Run `fit` and `transform` on given data.\n \n Parameters\n ----------\n data : pd.DataFrame\n data to fit on and transform\n+ y :\n+ Unused, but needed for sklearn pipeline\n Returns\n -------\n DataFrame\n@@ -98,7 +107,7 @@\n def _fit_for_series(self, series: pd.Series):\n top_values = list(series.value_counts().head(self.max_num_categories).index)\n other_value = self._get_unique_other_value(series)\n- mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)\n+ mapper = defaultdict(lambda: other_value, {k: k for k in top_values})\n return mapper\n \n def _get_unique_other_value(self, series: pd.Series):\n", "issue": "[BUG] Maximum feature cardinality in WholeDatasetDrift is not configurable\n**Describe the bug**\r\nWhen running the WholeDatasetDrift on a dataset with a feature that has cardinality greater then 255, the following error is thrown by the HistGradientBoostingClassifier:\r\n\r\n```\r\n/usr/local/lib/python3.9/dist-packages/sklearn/ensemble/_hist_gradient_boosting/gradient_boosting.py in _check_categories(self, X)\r\n 186 \r\n 187 if categories.size > self.max_bins:\r\n--> 188 raise ValueError(\r\n 189 f\"Categorical feature at index {f_idx} is \"\r\n 190 \"expected to have a \"\r\n\r\nValueError: Categorical feature at index 30 is expected to have a cardinality <= 255\r\n```\r\n\r\nThere is no way to adjust the max_bins parameter of HistGradientBoostingClassifier. Alternatively, the feature cardinality could be reduced before training the HistGradientBoostingClassifier using the max_num_categories parameter of WholeDatasetDrift\n", "before_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing common WholeDatasetDriftCheck (domain classifier drift) utils.\"\"\"\n\nfrom typing import List, Optional\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import\n\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.model_selection import train_test_split\nimport plotly.graph_objects as go\n\nfrom deepchecks.tabular import Dataset\nfrom deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces\nfrom deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none\nfrom deepchecks.utils.function import run_available_kwargs\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\ndef run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,\n numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,\n random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,\n max_num_categories: Optional[int], min_meaningful_drift_score: float):\n \"\"\"Calculate whole dataset drift.\"\"\"\n domain_classifier = generate_model(numerical_features, cat_features, random_state)\n\n train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)\n test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)\n\n # create new dataset, with label denoting whether sample belongs to test dataset\n domain_class_df = pd.concat([train_sample_df, test_sample_df])\n domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))\n\n x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,\n stratify=domain_class_labels,\n random_state=random_state,\n test_size=test_size)\n\n domain_classifier = domain_classifier.fit(x_train, y_train)\n\n y_test.name = 'belongs_to_test'\n domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),\n cat_features=cat_features, label='belongs_to_test')\n\n # calculate feature importance of domain_classifier, containing the information which features separate\n # the dataset best.\n fi, importance_type = calculate_feature_importance_or_none(\n domain_classifier,\n domain_test_dataset,\n force_permutation=True,\n permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}\n )\n\n fi = fi.sort_values(ascending=False) if fi is not None else None\n\n domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])\n drift_score = auc_to_drift_score(domain_classifier_auc)\n\n values_dict = {\n 'domain_classifier_auc': domain_classifier_auc,\n 'domain_classifier_drift_score': drift_score,\n 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},\n }\n\n feature_importance_note = f\"\"\"\n <span>\n The percents of explained dataset difference are the importance values for the feature calculated\n using `{importance_type}`.\n </span><br><br>\n \"\"\"\n\n if fi is not None and drift_score > min_meaningful_drift_score:\n top_fi = fi.head(n_top_columns)\n top_fi = top_fi.loc[top_fi > min_feature_importance]\n else:\n top_fi = None\n\n if top_fi is not None and len(top_fi):\n score = values_dict['domain_classifier_drift_score']\n\n displays = [feature_importance_note, build_drift_plot(score),\n '<h3>Main features contributing to drift</h3>',\n N_TOP_MESSAGE % n_top_columns]\n displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,\n max_num_categories)\n for feature in top_fi.index]\n else:\n displays = None\n\n return values_dict, displays\n\n\ndef generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],\n random_state: int = 42) -> Pipeline:\n \"\"\"Generate the unfitted Domain Classifier model.\"\"\"\n categorical_transformer = Pipeline(\n steps=[('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n unknown_value=np.nan,\n dtype=np.float64))]\n )\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', 'passthrough', numerical_columns),\n ('cat', categorical_transformer, categorical_columns),\n ]\n )\n\n return Pipeline(\n steps=[('preprocessing', preprocessor),\n ('model', HistGradientBoostingClassifier(\n max_depth=2, max_iter=10, random_state=random_state,\n categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)\n ))])\n\n\ndef auc_to_drift_score(auc: float) -> float:\n \"\"\"Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.\n\n Parameters\n ----------\n auc : float\n auc of the Domain Classifier\n \"\"\"\n return max(2 * auc - 1, 0)\n\n\ndef build_drift_plot(score):\n \"\"\"Build traffic light drift plot.\"\"\"\n bar_traces, x_axis, y_axis = drift_score_bar_traces(score)\n x_axis['title'] = 'Drift score'\n drift_plot = go.Figure(layout=dict(\n title='Drift Score - Whole Dataset Total',\n xaxis=x_axis,\n yaxis=y_axis,\n width=700,\n height=200\n\n ))\n\n drift_plot.add_traces(bar_traces)\n return drift_plot\n\n\ndef display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,\n max_num_categories: int = 10):\n \"\"\"Display a distribution comparison plot for the given columns.\"\"\"\n column_name = train_column.name\n\n title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'\n traces, xaxis_layout, yaxis_layout = \\\n feature_distribution_traces(train_column.dropna(),\n test_column.dropna(),\n column_name,\n is_categorical=column_name in cat_features,\n max_num_categories=max_num_categories)\n\n figure = go.Figure(layout=go.Layout(\n title=title,\n xaxis=xaxis_layout,\n yaxis=yaxis_layout,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.9,\n xanchor='left'),\n width=700,\n height=300\n ))\n\n figure.add_traces(traces)\n\n return figure\n", "path": "deepchecks/core/check_utils/whole_dataset_drift_utils.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module of RareCategoryEncoder.\"\"\"\nfrom typing import List, Optional\nfrom collections import defaultdict\n\nimport pandas as pd\n\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['RareCategoryEncoder']\n\n\nclass RareCategoryEncoder:\n \"\"\"Encodes rare categories into an \"other\" parameter.\n\n Note that this encoder assumes data is received as a DataFrame.\n\n Parameters\n ----------\n max_num_categories : int , default: 10\n Indicates the maximum number of unique categories in a single categorical column\n (rare categories will be changed to a form of \"other\")\n cols : Optional[List[Hashable]] , default: None\n Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`\n \"\"\"\n\n DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'\n\n def __init__(\n self,\n max_num_categories: int = 10,\n cols: Optional[List[Hashable]] = None\n ):\n self.max_num_categories = max_num_categories\n self.cols = cols\n self._col_mapping = None\n\n def fit(self, data: pd.DataFrame):\n \"\"\"Fit the encoder using given dataframe.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to fit from\n \"\"\"\n if self.cols is not None:\n self._col_mapping = data[self.cols].apply(self._fit_for_series, axis=0)\n else:\n self._col_mapping = data.apply(self._fit_for_series, axis=0)\n\n def transform(self, data: pd.DataFrame):\n \"\"\"Transform given data according to columns processed in `fit`.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to transform\n Returns\n -------\n DataFrame\n transformed data\n \"\"\"\n if self._col_mapping is None:\n raise RuntimeError('Cannot transform without fitting first')\n\n if self.cols is not None:\n data = data.copy()\n data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))\n else:\n data = data.apply(lambda s: s.map(self._col_mapping[s.name]))\n return data\n\n def fit_transform(self, data: pd.DataFrame):\n \"\"\"Run `fit` and `transform` on given data.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to fit on and transform\n Returns\n -------\n DataFrame\n transformed data\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n def _fit_for_series(self, series: pd.Series):\n top_values = list(series.value_counts().head(self.max_num_categories).index)\n other_value = self._get_unique_other_value(series)\n mapper = pd.Series(defaultdict(lambda: other_value, {k: k for k in top_values}), name=series.name)\n return mapper\n\n def _get_unique_other_value(self, series: pd.Series):\n unique_values = list(series.unique())\n other = self.DEFAULT_OTHER_VALUE\n i = 0\n while other in unique_values:\n other = self.DEFAULT_OTHER_VALUE + str(i)\n i += 1\n return other\n", "path": "deepchecks/utils/distribution/rare_category_encoder.py"}], "after_files": [{"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module containing common WholeDatasetDriftCheck (domain classifier drift) utils.\"\"\"\n\nfrom typing import List, Optional\nimport warnings\n\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.compose import ColumnTransformer\n\nwith warnings.catch_warnings():\n warnings.simplefilter('ignore')\n from sklearn.experimental import enable_hist_gradient_boosting # noqa # pylint: disable=unused-import\n\nfrom sklearn.ensemble import HistGradientBoostingClassifier\nfrom sklearn.metrics import roc_auc_score\nfrom sklearn.preprocessing import OrdinalEncoder\nfrom sklearn.model_selection import train_test_split\nimport plotly.graph_objects as go\n\nfrom deepchecks.tabular import Dataset\nfrom deepchecks.utils.distribution.plot import feature_distribution_traces, drift_score_bar_traces\nfrom deepchecks.utils.distribution.rare_category_encoder import RareCategoryEncoder\nfrom deepchecks.utils.features import N_TOP_MESSAGE, calculate_feature_importance_or_none\nfrom deepchecks.utils.function import run_available_kwargs\nfrom deepchecks.utils.strings import format_percent\nfrom deepchecks.utils.typing import Hashable\n\n\ndef run_whole_dataset_drift(train_dataframe: pd.DataFrame, test_dataframe: pd.DataFrame,\n numerical_features: List[Hashable], cat_features: List[Hashable], sample_size: int,\n random_state: int, test_size: float, n_top_columns: int, min_feature_importance: float,\n max_num_categories: Optional[int], min_meaningful_drift_score: float):\n \"\"\"Calculate whole dataset drift.\"\"\"\n domain_classifier = generate_model(numerical_features, cat_features, random_state)\n\n train_sample_df = train_dataframe.sample(sample_size, random_state=random_state)\n test_sample_df = test_dataframe.sample(sample_size, random_state=random_state)\n\n # create new dataset, with label denoting whether sample belongs to test dataset\n domain_class_df = pd.concat([train_sample_df, test_sample_df])\n domain_class_labels = pd.Series([0] * len(train_sample_df) + [1] * len(test_sample_df))\n\n x_train, x_test, y_train, y_test = train_test_split(domain_class_df, domain_class_labels,\n stratify=domain_class_labels,\n random_state=random_state,\n test_size=test_size)\n\n domain_classifier = domain_classifier.fit(x_train, y_train)\n\n y_test.name = 'belongs_to_test'\n domain_test_dataset = Dataset(pd.concat([x_test.reset_index(drop=True), y_test.reset_index(drop=True)], axis=1),\n cat_features=cat_features, label='belongs_to_test')\n\n # calculate feature importance of domain_classifier, containing the information which features separate\n # the dataset best.\n fi, importance_type = calculate_feature_importance_or_none(\n domain_classifier,\n domain_test_dataset,\n force_permutation=True,\n permutation_kwargs={'n_repeats': 10, 'random_state': random_state, 'timeout': 120}\n )\n\n fi = fi.sort_values(ascending=False) if fi is not None else None\n\n domain_classifier_auc = roc_auc_score(y_test, domain_classifier.predict_proba(x_test)[:, 1])\n drift_score = auc_to_drift_score(domain_classifier_auc)\n\n values_dict = {\n 'domain_classifier_auc': domain_classifier_auc,\n 'domain_classifier_drift_score': drift_score,\n 'domain_classifier_feature_importance': fi.to_dict() if fi is not None else {},\n }\n\n feature_importance_note = f\"\"\"\n <span>\n The percents of explained dataset difference are the importance values for the feature calculated\n using `{importance_type}`.\n </span><br><br>\n \"\"\"\n\n if fi is not None and drift_score > min_meaningful_drift_score:\n top_fi = fi.head(n_top_columns)\n top_fi = top_fi.loc[top_fi > min_feature_importance]\n else:\n top_fi = None\n\n if top_fi is not None and len(top_fi):\n score = values_dict['domain_classifier_drift_score']\n\n displays = [feature_importance_note, build_drift_plot(score),\n '<h3>Main features contributing to drift</h3>',\n N_TOP_MESSAGE % n_top_columns]\n displays += [display_dist(train_sample_df[feature], test_sample_df[feature], top_fi, cat_features,\n max_num_categories)\n for feature in top_fi.index]\n else:\n displays = None\n\n return values_dict, displays\n\n\ndef generate_model(numerical_columns: List[Hashable], categorical_columns: List[Hashable],\n random_state: int = 42) -> Pipeline:\n \"\"\"Generate the unfitted Domain Classifier model.\"\"\"\n categorical_transformer = Pipeline(\n steps=[('rare', RareCategoryEncoder(254)),\n ('encoder', run_available_kwargs(OrdinalEncoder, handle_unknown='use_encoded_value',\n unknown_value=np.nan,\n dtype=np.float64))]\n )\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', 'passthrough', numerical_columns),\n ('cat', categorical_transformer, categorical_columns),\n ]\n )\n\n return Pipeline(\n steps=[('preprocessing', preprocessor),\n ('model', HistGradientBoostingClassifier(\n max_depth=2, max_iter=10, random_state=random_state,\n categorical_features=[False] * len(numerical_columns) + [True] * len(categorical_columns)\n ))])\n\n\ndef auc_to_drift_score(auc: float) -> float:\n \"\"\"Calculate the drift score, which is 2*auc - 1, with auc being the auc of the Domain Classifier.\n\n Parameters\n ----------\n auc : float\n auc of the Domain Classifier\n \"\"\"\n return max(2 * auc - 1, 0)\n\n\ndef build_drift_plot(score):\n \"\"\"Build traffic light drift plot.\"\"\"\n bar_traces, x_axis, y_axis = drift_score_bar_traces(score)\n x_axis['title'] = 'Drift score'\n drift_plot = go.Figure(layout=dict(\n title='Drift Score - Whole Dataset Total',\n xaxis=x_axis,\n yaxis=y_axis,\n width=700,\n height=200\n\n ))\n\n drift_plot.add_traces(bar_traces)\n return drift_plot\n\n\ndef display_dist(train_column: pd.Series, test_column: pd.Series, fi_ser: pd.Series, cat_features,\n max_num_categories: int = 10):\n \"\"\"Display a distribution comparison plot for the given columns.\"\"\"\n column_name = train_column.name\n\n title = f'Feature: {column_name} - Explains {format_percent(fi_ser.loc[column_name])} of dataset difference'\n traces, xaxis_layout, yaxis_layout = \\\n feature_distribution_traces(train_column.dropna(),\n test_column.dropna(),\n column_name,\n is_categorical=column_name in cat_features,\n max_num_categories=max_num_categories)\n\n figure = go.Figure(layout=go.Layout(\n title=title,\n xaxis=xaxis_layout,\n yaxis=yaxis_layout,\n legend=dict(\n title='Dataset',\n yanchor='top',\n y=0.9,\n xanchor='left'),\n width=700,\n height=300\n ))\n\n figure.add_traces(traces)\n\n return figure\n", "path": "deepchecks/core/check_utils/whole_dataset_drift_utils.py"}, {"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021-2022 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"Module of RareCategoryEncoder.\"\"\"\nfrom typing import List, Optional\nfrom collections import defaultdict\n\nimport pandas as pd\n\nfrom deepchecks.utils.typing import Hashable\n\n\n__all__ = ['RareCategoryEncoder']\n\n\nclass RareCategoryEncoder:\n \"\"\"Encodes rare categories into an \"other\" parameter.\n\n Note that this encoder assumes data is received as a DataFrame.\n\n Parameters\n ----------\n max_num_categories : int , default: 10\n Indicates the maximum number of unique categories in a single categorical column\n (rare categories will be changed to a form of \"other\")\n cols : Optional[List[Hashable]] , default: None\n Columns to limit the encoder to work on. If non are given will work on all columns given in `fit`\n \"\"\"\n\n DEFAULT_OTHER_VALUE = 'OTHER_RARE_CATEGORY'\n\n def __init__(\n self,\n max_num_categories: int = 10,\n cols: Optional[List[Hashable]] = None\n ):\n self.max_num_categories = max_num_categories\n self.cols = cols\n self._col_mapping = None\n\n def fit(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument\n \"\"\"Fit the encoder using given dataframe.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to fit from\n y :\n Unused, but needed for sklearn pipeline\n \"\"\"\n self._col_mapping = {}\n\n if self.cols is not None:\n for col in self.cols:\n self._col_mapping[col] = self._fit_for_series(data[col])\n else:\n for col in data.columns:\n self._col_mapping[col] = self._fit_for_series(data[col])\n\n def transform(self, data: pd.DataFrame):\n \"\"\"Transform given data according to columns processed in `fit`.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to transform\n Returns\n -------\n DataFrame\n transformed data\n \"\"\"\n if self._col_mapping is None:\n raise RuntimeError('Cannot transform without fitting first')\n\n if self.cols is not None:\n data = data.copy()\n data[self.cols] = data[self.cols].apply(lambda s: s.map(self._col_mapping[s.name]))\n else:\n data = data.apply(lambda s: s.map(self._col_mapping[s.name]))\n\n return data\n\n def fit_transform(self, data: pd.DataFrame, y=None): # noqa # pylint: disable=unused-argument\n \"\"\"Run `fit` and `transform` on given data.\n\n Parameters\n ----------\n data : pd.DataFrame\n data to fit on and transform\n y :\n Unused, but needed for sklearn pipeline\n Returns\n -------\n DataFrame\n transformed data\n \"\"\"\n self.fit(data)\n return self.transform(data)\n\n def _fit_for_series(self, series: pd.Series):\n top_values = list(series.value_counts().head(self.max_num_categories).index)\n other_value = self._get_unique_other_value(series)\n mapper = defaultdict(lambda: other_value, {k: k for k in top_values})\n return mapper\n\n def _get_unique_other_value(self, series: pd.Series):\n unique_values = list(series.unique())\n other = self.DEFAULT_OTHER_VALUE\n i = 0\n while other in unique_values:\n other = self.DEFAULT_OTHER_VALUE + str(i)\n i += 1\n return other\n", "path": "deepchecks/utils/distribution/rare_category_encoder.py"}]}
| 3,617 | 848 |
gh_patches_debug_22042
|
rasdani/github-patches
|
git_diff
|
MycroftAI__mycroft-core-2706
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Idea: Enhance Amazon Polly support
Amazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.
This [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mycroft/tts/polly_tts.py`
Content:
```
1 # Copyright 2017 Mycroft AI Inc.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 #
15 from mycroft.tts.tts import TTS, TTSValidator
16 from mycroft.configuration import Configuration
17
18
19 class PollyTTS(TTS):
20 def __init__(self, lang="en-us", config=None):
21 import boto3
22 config = config or Configuration.get().get("tts", {}).get("polly", {})
23 super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),
24 audio_ext="mp3",
25 ssml_tags=["speak", "say-as", "voice",
26 "prosody", "break",
27 "emphasis", "sub", "lang",
28 "phoneme", "w", "whisper",
29 "amazon:auto-breaths",
30 "p", "s", "amazon:effect",
31 "mark"])
32
33 self.voice = self.config.get("voice", "Matthew")
34 self.key_id = self.config.get("access_key_id", '')
35 self.key = self.config.get("secret_access_key", '')
36 self.region = self.config.get("region", 'us-east-1')
37 self.polly = boto3.Session(aws_access_key_id=self.key_id,
38 aws_secret_access_key=self.key,
39 region_name=self.region).client('polly')
40
41 def get_tts(self, sentence, wav_file):
42 text_type = "text"
43 if self.remove_ssml(sentence) != sentence:
44 text_type = "ssml"
45 sentence = sentence \
46 .replace("\\whispered", "/amazon:effect") \
47 .replace("whispered", "amazon:effect name=\"whispered\"")
48 response = self.polly.synthesize_speech(
49 OutputFormat=self.audio_ext,
50 Text=sentence,
51 TextType=text_type,
52 VoiceId=self.voice)
53
54 with open(wav_file, 'wb') as f:
55 f.write(response['AudioStream'].read())
56 return (wav_file, None) # No phonemes
57
58 def describe_voices(self, language_code="en-US"):
59 if language_code.islower():
60 a, b = language_code.split("-")
61 b = b.upper()
62 language_code = "-".join([a, b])
63 # example 'it-IT' useful to retrieve voices
64 voices = self.polly.describe_voices(LanguageCode=language_code)
65
66 return voices
67
68
69 class PollyTTSValidator(TTSValidator):
70 def __init__(self, tts):
71 super(PollyTTSValidator, self).__init__(tts)
72
73 def validate_lang(self):
74 # TODO
75 pass
76
77 def validate_dependencies(self):
78 try:
79 from boto3 import Session
80 except ImportError:
81 raise Exception(
82 'PollyTTS dependencies not installed, please run pip install '
83 'boto3 ')
84
85 def validate_connection(self):
86 try:
87 if not self.tts.voice:
88 raise Exception("Polly TTS Voice not configured")
89 output = self.tts.describe_voices()
90 except TypeError:
91 raise Exception(
92 'PollyTTS server could not be verified. Please check your '
93 'internet connection and credentials.')
94
95 def get_tts_class(self):
96 return PollyTTS
97
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py
--- a/mycroft/tts/polly_tts.py
+++ b/mycroft/tts/polly_tts.py
@@ -34,6 +34,7 @@
self.key_id = self.config.get("access_key_id", '')
self.key = self.config.get("secret_access_key", '')
self.region = self.config.get("region", 'us-east-1')
+ self.engine = self.config.get("engine", "standard")
self.polly = boto3.Session(aws_access_key_id=self.key_id,
aws_secret_access_key=self.key,
region_name=self.region).client('polly')
@@ -49,7 +50,8 @@
OutputFormat=self.audio_ext,
Text=sentence,
TextType=text_type,
- VoiceId=self.voice)
+ VoiceId=self.voice,
+ Engine=self.engine)
with open(wav_file, 'wb') as f:
f.write(response['AudioStream'].read())
|
{"golden_diff": "diff --git a/mycroft/tts/polly_tts.py b/mycroft/tts/polly_tts.py\n--- a/mycroft/tts/polly_tts.py\n+++ b/mycroft/tts/polly_tts.py\n@@ -34,6 +34,7 @@\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n+ self.engine = self.config.get(\"engine\", \"standard\")\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n@@ -49,7 +50,8 @@\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n- VoiceId=self.voice)\n+ VoiceId=self.voice,\n+ Engine=self.engine)\n \n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n", "issue": "Idea: Enhance Amazon Polly support\nAmazon Polly works well using standard voices, I have it running perfectly under the latest Picroft image. However, there is no current support for 'neural' engine voices, as well as 'conversational' style SSML. Both of these provide exceptionally high quality text-to-speech audio and would be nice to have the ability to use with Mycroft.\r\n\r\nThis [post](https://community.mycroft.ai/t/regarding-polly-tts-support/8722/10) on the community forums goes in to a little more detail on it.\r\n\r\nThanks!\n", "before_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mycroft.tts.tts import TTS, TTSValidator\nfrom mycroft.configuration import Configuration\n\n\nclass PollyTTS(TTS):\n def __init__(self, lang=\"en-us\", config=None):\n import boto3\n config = config or Configuration.get().get(\"tts\", {}).get(\"polly\", {})\n super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),\n audio_ext=\"mp3\",\n ssml_tags=[\"speak\", \"say-as\", \"voice\",\n \"prosody\", \"break\",\n \"emphasis\", \"sub\", \"lang\",\n \"phoneme\", \"w\", \"whisper\",\n \"amazon:auto-breaths\",\n \"p\", \"s\", \"amazon:effect\",\n \"mark\"])\n\n self.voice = self.config.get(\"voice\", \"Matthew\")\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n\n def get_tts(self, sentence, wav_file):\n text_type = \"text\"\n if self.remove_ssml(sentence) != sentence:\n text_type = \"ssml\"\n sentence = sentence \\\n .replace(\"\\\\whispered\", \"/amazon:effect\") \\\n .replace(\"whispered\", \"amazon:effect name=\\\"whispered\\\"\")\n response = self.polly.synthesize_speech(\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n VoiceId=self.voice)\n\n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n return (wav_file, None) # No phonemes\n\n def describe_voices(self, language_code=\"en-US\"):\n if language_code.islower():\n a, b = language_code.split(\"-\")\n b = b.upper()\n language_code = \"-\".join([a, b])\n # example 'it-IT' useful to retrieve voices\n voices = self.polly.describe_voices(LanguageCode=language_code)\n\n return voices\n\n\nclass PollyTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(PollyTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_dependencies(self):\n try:\n from boto3 import Session\n except ImportError:\n raise Exception(\n 'PollyTTS dependencies not installed, please run pip install '\n 'boto3 ')\n\n def validate_connection(self):\n try:\n if not self.tts.voice:\n raise Exception(\"Polly TTS Voice not configured\")\n output = self.tts.describe_voices()\n except TypeError:\n raise Exception(\n 'PollyTTS server could not be verified. Please check your '\n 'internet connection and credentials.')\n\n def get_tts_class(self):\n return PollyTTS\n", "path": "mycroft/tts/polly_tts.py"}], "after_files": [{"content": "# Copyright 2017 Mycroft AI Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nfrom mycroft.tts.tts import TTS, TTSValidator\nfrom mycroft.configuration import Configuration\n\n\nclass PollyTTS(TTS):\n def __init__(self, lang=\"en-us\", config=None):\n import boto3\n config = config or Configuration.get().get(\"tts\", {}).get(\"polly\", {})\n super(PollyTTS, self).__init__(lang, config, PollyTTSValidator(self),\n audio_ext=\"mp3\",\n ssml_tags=[\"speak\", \"say-as\", \"voice\",\n \"prosody\", \"break\",\n \"emphasis\", \"sub\", \"lang\",\n \"phoneme\", \"w\", \"whisper\",\n \"amazon:auto-breaths\",\n \"p\", \"s\", \"amazon:effect\",\n \"mark\"])\n\n self.voice = self.config.get(\"voice\", \"Matthew\")\n self.key_id = self.config.get(\"access_key_id\", '')\n self.key = self.config.get(\"secret_access_key\", '')\n self.region = self.config.get(\"region\", 'us-east-1')\n self.engine = self.config.get(\"engine\", \"standard\")\n self.polly = boto3.Session(aws_access_key_id=self.key_id,\n aws_secret_access_key=self.key,\n region_name=self.region).client('polly')\n\n def get_tts(self, sentence, wav_file):\n text_type = \"text\"\n if self.remove_ssml(sentence) != sentence:\n text_type = \"ssml\"\n sentence = sentence \\\n .replace(\"\\\\whispered\", \"/amazon:effect\") \\\n .replace(\"whispered\", \"amazon:effect name=\\\"whispered\\\"\")\n response = self.polly.synthesize_speech(\n OutputFormat=self.audio_ext,\n Text=sentence,\n TextType=text_type,\n VoiceId=self.voice,\n Engine=self.engine)\n\n with open(wav_file, 'wb') as f:\n f.write(response['AudioStream'].read())\n return (wav_file, None) # No phonemes\n\n def describe_voices(self, language_code=\"en-US\"):\n if language_code.islower():\n a, b = language_code.split(\"-\")\n b = b.upper()\n language_code = \"-\".join([a, b])\n # example 'it-IT' useful to retrieve voices\n voices = self.polly.describe_voices(LanguageCode=language_code)\n\n return voices\n\n\nclass PollyTTSValidator(TTSValidator):\n def __init__(self, tts):\n super(PollyTTSValidator, self).__init__(tts)\n\n def validate_lang(self):\n # TODO\n pass\n\n def validate_dependencies(self):\n try:\n from boto3 import Session\n except ImportError:\n raise Exception(\n 'PollyTTS dependencies not installed, please run pip install '\n 'boto3 ')\n\n def validate_connection(self):\n try:\n if not self.tts.voice:\n raise Exception(\"Polly TTS Voice not configured\")\n output = self.tts.describe_voices()\n except TypeError:\n raise Exception(\n 'PollyTTS server could not be verified. Please check your '\n 'internet connection and credentials.')\n\n def get_tts_class(self):\n return PollyTTS\n", "path": "mycroft/tts/polly_tts.py"}]}
| 1,386 | 227 |
gh_patches_debug_22555
|
rasdani/github-patches
|
git_diff
|
cookiecutter__cookiecutter-967
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
gh: prefix doesn't work anymore
* Cookiecutter version: 1.5.1
* Template project url: `gh:*`
* Python version: 2.7.13
* Operating System: Linux
### Description:
cookiecutter does not honor prefixes anymore.
### What I've run:
Simply testing the example from the README doesn't work as expected:
``` bash
$ cookiecutter gh:audreyr/cookiecutter-pypackage
A valid repository for "gh:audreyr/cookiecutter-pypackage" could not be found in the following locations:
gh:audreyr/cookiecutter-pypackage
/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage
```
The same commands using the full repository path works as expected:
```bash
$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `cookiecutter/config.py`
Content:
```
1 # -*- coding: utf-8 -*-
2
3 """Global configuration handling."""
4
5 from __future__ import unicode_literals
6 import copy
7 import logging
8 import os
9 import io
10
11 import poyo
12
13 from .exceptions import ConfigDoesNotExistException
14 from .exceptions import InvalidConfiguration
15
16
17 logger = logging.getLogger(__name__)
18
19 USER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')
20
21 BUILTIN_ABBREVIATIONS = {
22 'gh': 'https://github.com/{0}.git',
23 'gl': 'https://gitlab.com/{0}.git',
24 'bb': 'https://bitbucket.org/{0}',
25 }
26
27 DEFAULT_CONFIG = {
28 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),
29 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),
30 'default_context': {},
31 'abbreviations': BUILTIN_ABBREVIATIONS,
32 }
33
34
35 def _expand_path(path):
36 """Expand both environment variables and user home in the given path."""
37 path = os.path.expandvars(path)
38 path = os.path.expanduser(path)
39 return path
40
41
42 def get_config(config_path):
43 """Retrieve the config from the specified path, returning a config dict."""
44 if not os.path.exists(config_path):
45 raise ConfigDoesNotExistException
46
47 logger.debug('config_path is {0}'.format(config_path))
48 with io.open(config_path, encoding='utf-8') as file_handle:
49 try:
50 yaml_dict = poyo.parse_string(file_handle.read())
51 except poyo.exceptions.PoyoException as e:
52 raise InvalidConfiguration(
53 'Unable to parse YAML file {}. Error: {}'
54 ''.format(config_path, e)
55 )
56
57 config_dict = copy.copy(DEFAULT_CONFIG)
58 config_dict.update(yaml_dict)
59
60 raw_replay_dir = config_dict['replay_dir']
61 config_dict['replay_dir'] = _expand_path(raw_replay_dir)
62
63 raw_cookies_dir = config_dict['cookiecutters_dir']
64 config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)
65
66 return config_dict
67
68
69 def get_user_config(config_file=None, default_config=False):
70 """Return the user config as a dict.
71
72 If ``default_config`` is True, ignore ``config_file`` and return default
73 values for the config parameters.
74
75 If a path to a ``config_file`` is given, that is different from the default
76 location, load the user config from that.
77
78 Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``
79 environment variable. If set, load the config from this path. This will
80 raise an error if the specified path is not valid.
81
82 If the environment variable is not set, try the default config file path
83 before falling back to the default config values.
84 """
85 # Do NOT load a config. Return defaults instead.
86 if default_config:
87 return copy.copy(DEFAULT_CONFIG)
88
89 # Load the given config file
90 if config_file and config_file is not USER_CONFIG_PATH:
91 return get_config(config_file)
92
93 try:
94 # Does the user set up a config environment variable?
95 env_config_file = os.environ['COOKIECUTTER_CONFIG']
96 except KeyError:
97 # Load an optional user config if it exists
98 # otherwise return the defaults
99 if os.path.exists(USER_CONFIG_PATH):
100 return get_config(USER_CONFIG_PATH)
101 else:
102 return copy.copy(DEFAULT_CONFIG)
103 else:
104 # There is a config environment variable. Try to load it.
105 # Do not check for existence, so invalid file paths raise an error.
106 return get_config(env_config_file)
107
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/cookiecutter/config.py b/cookiecutter/config.py
--- a/cookiecutter/config.py
+++ b/cookiecutter/config.py
@@ -39,6 +39,25 @@
return path
+def merge_configs(default, overwrite):
+ """Recursively update a dict with the key/value pair of another.
+
+ Dict values that are dictionaries themselves will be updated, whilst
+ preserving existing keys.
+ """
+ new_config = copy.deepcopy(default)
+
+ for k, v in overwrite.items():
+ # Make sure to preserve existing items in
+ # nested dicts, for example `abbreviations`
+ if isinstance(v, dict):
+ new_config[k] = merge_configs(default[k], v)
+ else:
+ new_config[k] = v
+
+ return new_config
+
+
def get_config(config_path):
"""Retrieve the config from the specified path, returning a config dict."""
if not os.path.exists(config_path):
@@ -54,8 +73,7 @@
''.format(config_path, e)
)
- config_dict = copy.copy(DEFAULT_CONFIG)
- config_dict.update(yaml_dict)
+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)
raw_replay_dir = config_dict['replay_dir']
config_dict['replay_dir'] = _expand_path(raw_replay_dir)
|
{"golden_diff": "diff --git a/cookiecutter/config.py b/cookiecutter/config.py\n--- a/cookiecutter/config.py\n+++ b/cookiecutter/config.py\n@@ -39,6 +39,25 @@\n return path\n \n \n+def merge_configs(default, overwrite):\n+ \"\"\"Recursively update a dict with the key/value pair of another.\n+\n+ Dict values that are dictionaries themselves will be updated, whilst\n+ preserving existing keys.\n+ \"\"\"\n+ new_config = copy.deepcopy(default)\n+\n+ for k, v in overwrite.items():\n+ # Make sure to preserve existing items in\n+ # nested dicts, for example `abbreviations`\n+ if isinstance(v, dict):\n+ new_config[k] = merge_configs(default[k], v)\n+ else:\n+ new_config[k] = v\n+\n+ return new_config\n+\n+\n def get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n@@ -54,8 +73,7 @@\n ''.format(config_path, e)\n )\n \n- config_dict = copy.copy(DEFAULT_CONFIG)\n- config_dict.update(yaml_dict)\n+ config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)\n \n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n", "issue": "gh: prefix doesn't work anymore\n* Cookiecutter version: 1.5.1\r\n* Template project url: `gh:*`\r\n* Python version: 2.7.13\r\n* Operating System: Linux\r\n\r\n### Description:\r\n\r\ncookiecutter does not honor prefixes anymore.\r\n\r\n### What I've run:\r\n\r\nSimply testing the example from the README doesn't work as expected:\r\n\r\n``` bash\r\n$ cookiecutter gh:audreyr/cookiecutter-pypackage\r\nA valid repository for \"gh:audreyr/cookiecutter-pypackage\" could not be found in the following locations:\r\ngh:audreyr/cookiecutter-pypackage\r\n/home/me/.cookiecutters/gh:audreyr/cookiecutter-pypackage\r\n```\r\nThe same commands using the full repository path works as expected:\r\n\r\n```bash\r\n$ cookiecutter https://github.com/audreyr/cookiecutter-pypackage\r\n```\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Global configuration handling.\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nBUILTIN_ABBREVIATIONS = {\n 'gh': 'https://github.com/{0}.git',\n 'gl': 'https://gitlab.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {},\n 'abbreviations': BUILTIN_ABBREVIATIONS,\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = copy.copy(DEFAULT_CONFIG)\n config_dict.update(yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=None, default_config=False):\n \"\"\"Return the user config as a dict.\n\n If ``default_config`` is True, ignore ``config_file`` and return default\n values for the config parameters.\n\n If a path to a ``config_file`` is given, that is different from the default\n location, load the user config from that.\n\n Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``\n environment variable. If set, load the config from this path. This will\n raise an error if the specified path is not valid.\n\n If the environment variable is not set, try the default config file path\n before falling back to the default config values.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if default_config:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\n\n\"\"\"Global configuration handling.\"\"\"\n\nfrom __future__ import unicode_literals\nimport copy\nimport logging\nimport os\nimport io\n\nimport poyo\n\nfrom .exceptions import ConfigDoesNotExistException\nfrom .exceptions import InvalidConfiguration\n\n\nlogger = logging.getLogger(__name__)\n\nUSER_CONFIG_PATH = os.path.expanduser('~/.cookiecutterrc')\n\nBUILTIN_ABBREVIATIONS = {\n 'gh': 'https://github.com/{0}.git',\n 'gl': 'https://gitlab.com/{0}.git',\n 'bb': 'https://bitbucket.org/{0}',\n}\n\nDEFAULT_CONFIG = {\n 'cookiecutters_dir': os.path.expanduser('~/.cookiecutters/'),\n 'replay_dir': os.path.expanduser('~/.cookiecutter_replay/'),\n 'default_context': {},\n 'abbreviations': BUILTIN_ABBREVIATIONS,\n}\n\n\ndef _expand_path(path):\n \"\"\"Expand both environment variables and user home in the given path.\"\"\"\n path = os.path.expandvars(path)\n path = os.path.expanduser(path)\n return path\n\n\ndef merge_configs(default, overwrite):\n \"\"\"Recursively update a dict with the key/value pair of another.\n\n Dict values that are dictionaries themselves will be updated, whilst\n preserving existing keys.\n \"\"\"\n new_config = copy.deepcopy(default)\n\n for k, v in overwrite.items():\n # Make sure to preserve existing items in\n # nested dicts, for example `abbreviations`\n if isinstance(v, dict):\n new_config[k] = merge_configs(default[k], v)\n else:\n new_config[k] = v\n\n return new_config\n\n\ndef get_config(config_path):\n \"\"\"Retrieve the config from the specified path, returning a config dict.\"\"\"\n if not os.path.exists(config_path):\n raise ConfigDoesNotExistException\n\n logger.debug('config_path is {0}'.format(config_path))\n with io.open(config_path, encoding='utf-8') as file_handle:\n try:\n yaml_dict = poyo.parse_string(file_handle.read())\n except poyo.exceptions.PoyoException as e:\n raise InvalidConfiguration(\n 'Unable to parse YAML file {}. Error: {}'\n ''.format(config_path, e)\n )\n\n config_dict = merge_configs(DEFAULT_CONFIG, yaml_dict)\n\n raw_replay_dir = config_dict['replay_dir']\n config_dict['replay_dir'] = _expand_path(raw_replay_dir)\n\n raw_cookies_dir = config_dict['cookiecutters_dir']\n config_dict['cookiecutters_dir'] = _expand_path(raw_cookies_dir)\n\n return config_dict\n\n\ndef get_user_config(config_file=None, default_config=False):\n \"\"\"Return the user config as a dict.\n\n If ``default_config`` is True, ignore ``config_file`` and return default\n values for the config parameters.\n\n If a path to a ``config_file`` is given, that is different from the default\n location, load the user config from that.\n\n Otherwise look up the config file path in the ``COOKIECUTTER_CONFIG``\n environment variable. If set, load the config from this path. This will\n raise an error if the specified path is not valid.\n\n If the environment variable is not set, try the default config file path\n before falling back to the default config values.\n \"\"\"\n # Do NOT load a config. Return defaults instead.\n if default_config:\n return copy.copy(DEFAULT_CONFIG)\n\n # Load the given config file\n if config_file and config_file is not USER_CONFIG_PATH:\n return get_config(config_file)\n\n try:\n # Does the user set up a config environment variable?\n env_config_file = os.environ['COOKIECUTTER_CONFIG']\n except KeyError:\n # Load an optional user config if it exists\n # otherwise return the defaults\n if os.path.exists(USER_CONFIG_PATH):\n return get_config(USER_CONFIG_PATH)\n else:\n return copy.copy(DEFAULT_CONFIG)\n else:\n # There is a config environment variable. Try to load it.\n # Do not check for existence, so invalid file paths raise an error.\n return get_config(env_config_file)\n", "path": "cookiecutter/config.py"}]}
| 1,452 | 305 |
gh_patches_debug_4279
|
rasdani/github-patches
|
git_diff
|
facebookresearch__ParlAI-3345
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Reddit Movie Dialog no longer exists
**Bug description**
[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.
**Reproduction steps**
```
TrainModel.main(
# similar to before
task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat',
model='transformer/generator',
model_file='from_pretrained/model',
# initialize with a pretrained model
init_model='zoo:tutorial_transformer_generator/model',
# arguments we get from the pretrained model.
# Unfortunately, these must be looked up separately for each model.
n_heads=16, n_layers=8, n_positions=512, text_truncate=512,
label_truncate=128, ffn_size=2048, embedding_size=512,
activation='gelu', variant='xlm',
dict_lower=True, dict_tokenizer='bpe',
dict_file='zoo:tutorial_transformer_generator/model.dict',
learn_positional_embeddings=True,
# some training arguments, specific to this fine-tuning
# use a small learning rate with ADAM optimizer
lr=1e-5, optimizer='adam',
warmup_updates=100,
# early stopping on perplexity
validation_metric='ppl',
# train at most 10 minutes, and validate every 0.25 epochs
max_train_time=600, validation_every_n_epochs=0.25,
# depend on your gpu. If you have a V100, this is good
batchsize=12, fp16=True, fp16_impl='mem_efficient',
# speeds up validation
skip_generation=True,
# helps us cram more examples into our gpu at a time
dynamic_batching='full',
)
```
**Logs**
Please paste the command line output:
```
---------------------------------------------------------------------------
ModuleNotFoundError Traceback (most recent call last)
<ipython-input-39-ff3044de39fe> in <module>()
36
37 # helps us cram more examples into our gpu at a time
---> 38 dynamic_batching='full',
39 )
15 frames
/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)
ModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'
---------------------------------------------------------------------------
NOTE: If your import is failing due to a missing package, you can
manually install dependencies using either !pip or !apt.
To view examples of installing some common dependencies, click the
"Open Examples" button below.
---------------------------------------------------------------------------```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/generate_task_list.py`
Content:
```
1 #!/usr/bin/env python3
2 # Copyright (c) Facebook, Inc. and its affiliates.
3 # This source code is licensed under the MIT license found in the
4 # LICENSE file in the root directory of this source tree.
5
6 from parlai.tasks.task_list import task_list
7
8 MASTER = "https://github.com/facebookresearch/ParlAI/tree/master"
9
10 category_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']
11 category_task_list = {x: [] for x in category_order}
12
13 fout = open('task_list.inc', 'w')
14
15 s = "They consist of: "
16 for t in category_order:
17 fout.write(f"1. {t} tasks\n")
18 fout.write("\n")
19
20 for task_dict in task_list:
21 tags = task_dict.get('tags', None)
22 for tag in tags:
23 if tag in category_task_list:
24 category_task_list[tag].append(task_dict)
25
26 for num_category, (category, tl) in enumerate(category_task_list.items()):
27 if num_category != 0:
28 fout.write("\n-----\n\n")
29
30 fout.write(f'## {category} Tasks\n')
31
32 for task_dict in tl:
33 id = task_dict.get('id', None)
34 display_name = task_dict.get('display_name', None)
35 task = task_dict.get('task', None)
36 tags = task_dict.get('tags', None)
37 description = task_dict.get('description', None)
38 notes = task_dict.get('notes', None)
39 code_urlend = task[: max(task.find(':'), len(task))]
40 code_url = f"{MASTER}/parlai/tasks/{code_urlend}"
41 links = task_dict.get("links", {})
42 assert isinstance(links, dict), f"task {id} is poorly formatted"
43 urls = [(k, v) for k, v in links.items()]
44 urls.append(("code", code_url))
45
46 urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
47 fout.write(f"### {display_name}\n")
48 fout.write(f"_Links_: {urls_md}\n\n")
49 if description:
50 fout.write(description + "\n")
51 if notes:
52 fout.write(":::{admonition,note} Notes\n")
53 fout.write(notes + "\n")
54 fout.write(":::\n")
55 fout.write("\n\n")
56
57 fout.close()
58
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py
--- a/docs/source/generate_task_list.py
+++ b/docs/source/generate_task_list.py
@@ -45,6 +45,7 @@
urls_md = ", ".join(f"[{k}]({v})" for k, v in urls)
fout.write(f"### {display_name}\n")
+ fout.write(f"_Usage_: `--task {task}`\n\n")
fout.write(f"_Links_: {urls_md}\n\n")
if description:
fout.write(description + "\n")
|
{"golden_diff": "diff --git a/docs/source/generate_task_list.py b/docs/source/generate_task_list.py\n--- a/docs/source/generate_task_list.py\n+++ b/docs/source/generate_task_list.py\n@@ -45,6 +45,7 @@\n \n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n+ fout.write(f\"_Usage_: `--task {task}`\\n\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n", "issue": "Reddit Movie Dialog no longer exists\n**Bug description**\r\n[Reddit Movie Dialog](https://parl.ai/docs/tasks.html#movie-dialog-reddit) no longer exists.\r\n\r\n**Reproduction steps**\r\n```\r\nTrainModel.main(\r\n # similar to before\r\n task='empathetic_dialogues,blended_skill_talk,movie_dialog_reddit,convai2,persona_chat', \r\n model='transformer/generator',\r\n model_file='from_pretrained/model',\r\n \r\n # initialize with a pretrained model\r\n init_model='zoo:tutorial_transformer_generator/model',\r\n \r\n # arguments we get from the pretrained model.\r\n # Unfortunately, these must be looked up separately for each model.\r\n n_heads=16, n_layers=8, n_positions=512, text_truncate=512,\r\n label_truncate=128, ffn_size=2048, embedding_size=512,\r\n activation='gelu', variant='xlm',\r\n dict_lower=True, dict_tokenizer='bpe',\r\n dict_file='zoo:tutorial_transformer_generator/model.dict',\r\n learn_positional_embeddings=True,\r\n \r\n # some training arguments, specific to this fine-tuning\r\n # use a small learning rate with ADAM optimizer\r\n lr=1e-5, optimizer='adam',\r\n warmup_updates=100,\r\n # early stopping on perplexity\r\n validation_metric='ppl',\r\n # train at most 10 minutes, and validate every 0.25 epochs\r\n max_train_time=600, validation_every_n_epochs=0.25,\r\n \r\n # depend on your gpu. If you have a V100, this is good\r\n batchsize=12, fp16=True, fp16_impl='mem_efficient',\r\n \r\n # speeds up validation\r\n skip_generation=True,\r\n \r\n # helps us cram more examples into our gpu at a time\r\n dynamic_batching='full',\r\n)\r\n```\r\n\r\n**Logs**\r\nPlease paste the command line output:\r\n\r\n```\r\n---------------------------------------------------------------------------\r\nModuleNotFoundError Traceback (most recent call last)\r\n<ipython-input-39-ff3044de39fe> in <module>()\r\n 36 \r\n 37 # helps us cram more examples into our gpu at a time\r\n---> 38 dynamic_batching='full',\r\n 39 )\r\n\r\n15 frames\r\n/usr/lib/python3.6/importlib/_bootstrap.py in _find_and_load_unlocked(name, import_)\r\n\r\nModuleNotFoundError: No module named 'parlai.tasks.movie_dialog_reddit'\r\n\r\n---------------------------------------------------------------------------\r\nNOTE: If your import is failing due to a missing package, you can\r\nmanually install dependencies using either !pip or !apt.\r\n\r\nTo view examples of installing some common dependencies, click the\r\n\"Open Examples\" button below.\r\n---------------------------------------------------------------------------```\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.tasks.task_list import task_list\n\nMASTER = \"https://github.com/facebookresearch/ParlAI/tree/master\"\n\ncategory_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']\ncategory_task_list = {x: [] for x in category_order}\n\nfout = open('task_list.inc', 'w')\n\ns = \"They consist of: \"\nfor t in category_order:\n fout.write(f\"1. {t} tasks\\n\")\nfout.write(\"\\n\")\n\nfor task_dict in task_list:\n tags = task_dict.get('tags', None)\n for tag in tags:\n if tag in category_task_list:\n category_task_list[tag].append(task_dict)\n\nfor num_category, (category, tl) in enumerate(category_task_list.items()):\n if num_category != 0:\n fout.write(\"\\n-----\\n\\n\")\n\n fout.write(f'## {category} Tasks\\n')\n\n for task_dict in tl:\n id = task_dict.get('id', None)\n display_name = task_dict.get('display_name', None)\n task = task_dict.get('task', None)\n tags = task_dict.get('tags', None)\n description = task_dict.get('description', None)\n notes = task_dict.get('notes', None)\n code_urlend = task[: max(task.find(':'), len(task))]\n code_url = f\"{MASTER}/parlai/tasks/{code_urlend}\"\n links = task_dict.get(\"links\", {})\n assert isinstance(links, dict), f\"task {id} is poorly formatted\"\n urls = [(k, v) for k, v in links.items()]\n urls.append((\"code\", code_url))\n\n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n if notes:\n fout.write(\":::{admonition,note} Notes\\n\")\n fout.write(notes + \"\\n\")\n fout.write(\":::\\n\")\n fout.write(\"\\n\\n\")\n\nfout.close()\n", "path": "docs/source/generate_task_list.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates.\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nfrom parlai.tasks.task_list import task_list\n\nMASTER = \"https://github.com/facebookresearch/ParlAI/tree/master\"\n\ncategory_order = ['QA', 'Cloze', 'Goal', 'ChitChat', 'Negotiation', 'Visual', 'decanlp']\ncategory_task_list = {x: [] for x in category_order}\n\nfout = open('task_list.inc', 'w')\n\ns = \"They consist of: \"\nfor t in category_order:\n fout.write(f\"1. {t} tasks\\n\")\nfout.write(\"\\n\")\n\nfor task_dict in task_list:\n tags = task_dict.get('tags', None)\n for tag in tags:\n if tag in category_task_list:\n category_task_list[tag].append(task_dict)\n\nfor num_category, (category, tl) in enumerate(category_task_list.items()):\n if num_category != 0:\n fout.write(\"\\n-----\\n\\n\")\n\n fout.write(f'## {category} Tasks\\n')\n\n for task_dict in tl:\n id = task_dict.get('id', None)\n display_name = task_dict.get('display_name', None)\n task = task_dict.get('task', None)\n tags = task_dict.get('tags', None)\n description = task_dict.get('description', None)\n notes = task_dict.get('notes', None)\n code_urlend = task[: max(task.find(':'), len(task))]\n code_url = f\"{MASTER}/parlai/tasks/{code_urlend}\"\n links = task_dict.get(\"links\", {})\n assert isinstance(links, dict), f\"task {id} is poorly formatted\"\n urls = [(k, v) for k, v in links.items()]\n urls.append((\"code\", code_url))\n\n urls_md = \", \".join(f\"[{k}]({v})\" for k, v in urls)\n fout.write(f\"### {display_name}\\n\")\n fout.write(f\"_Usage_: `--task {task}`\\n\\n\")\n fout.write(f\"_Links_: {urls_md}\\n\\n\")\n if description:\n fout.write(description + \"\\n\")\n if notes:\n fout.write(\":::{admonition,note} Notes\\n\")\n fout.write(notes + \"\\n\")\n fout.write(\":::\\n\")\n fout.write(\"\\n\\n\")\n\nfout.close()\n", "path": "docs/source/generate_task_list.py"}]}
| 1,501 | 134 |
gh_patches_debug_20139
|
rasdani/github-patches
|
git_diff
|
nilearn__nilearn-2963
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AxisError when computing `reporting.get_cluster_table`
This issue concerns what seems to me like a bug in the `reporting.get_cluster_table` method. I have been encoutering a `AxisError` raised by numpy when trying to compute the table with certain `stat_threshold` values.
With a high threshold value (e.g `3.`) the computation works fine, but when using smaller threshold values (1e-2), the error is thrown.
The error happens because the `ijk` value passed to a numpy `apply_along_axis` [here](https://github.com/nilearn/nilearn/blob/ac1a934e3b2b4061f894b518c960412df9ea4f11/nilearn/reporting/_get_clusters_table.py#L83) is an empty list. I don't know why yet. You can reproduce the error with the snippet below.
I guess it makes less sense to extract peaks with a lower threshold, but it still worth investigating why the error occurs.
### Expected behavior
`get_clusters_table` return the clusters table whatever the `stat_threshold` value
### Actual behavior
`get_clusters_table` raises an error when `stat_threshold` values are small.
### Steps and code to reproduce bug
```python
import tempfile
from nilearn import reporting
from nilearn.datasets import fetch_neurovault_ids
DATA_PATH = tempfile.TemporaryDirectory()
collection_id = 307
neurovault_element = fetch_neurovault_ids(
collection_ids=[collection_id],
mode="download_new",
data_dir=str(DATA_PATH),
)
sample_brain_map = neurovault_element.images[0]
# Working fine with a stat threshold of 3.
neurovault_peaks = reporting.get_clusters_table(
sample_brain_map,
stat_threshold=3.,
min_distance=12.,
)
# Raising error with a stat threshold of 1e-2
neurovault_peaks = reporting.get_clusters_table(
sample_brain_map,
stat_threshold=1e-2,
min_distance=12.,
)
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `nilearn/reporting/_get_clusters_table.py`
Content:
```
1 """
2 This module implements plotting functions useful to report analysis results.
3
4 Author: Martin Perez-Guevara, Elvis Dohmatob, 2017
5 """
6
7 import warnings
8 from string import ascii_lowercase
9
10 import numpy as np
11 import pandas as pd
12 import nibabel as nib
13 from scipy import ndimage
14
15 from nilearn.image import get_data
16 from nilearn.image.resampling import coord_transform
17 from nilearn._utils import check_niimg_3d
18
19
20 def _local_max(data, affine, min_distance):
21 """Find all local maxima of the array, separated by at least min_distance.
22 Adapted from https://stackoverflow.com/a/22631583/2589328
23
24 Parameters
25 ----------
26 data : array_like
27 3D array of with masked values for cluster.
28
29 affine : np.ndarray
30 Square matrix specifying the position of the image array data
31 in a reference space.
32
33 min_distance : int
34 Minimum distance between local maxima in ``data``, in terms of mm.
35
36 Returns
37 -------
38 ijk : `numpy.ndarray`
39 (n_foci, 3) array of local maxima indices for cluster.
40
41 vals : `numpy.ndarray`
42 (n_foci,) array of values from data at ijk.
43
44 """
45 ijk, vals = _identify_subpeaks(data)
46 xyz, ijk, vals = _sort_subpeaks(ijk, vals, affine)
47 ijk, vals = _pare_subpeaks(xyz, ijk, vals, min_distance)
48 return ijk, vals
49
50
51 def _identify_subpeaks(data):
52 """Identify cluster peak and subpeaks based on minimum distance.
53
54 Parameters
55 ----------
56 data : `numpy.ndarray`
57 3D array of with masked values for cluster.
58
59 Returns
60 -------
61 ijk : `numpy.ndarray`
62 (n_foci, 3) array of local maxima indices for cluster.
63 vals : `numpy.ndarray`
64 (n_foci,) array of values from data at ijk.
65
66 Notes
67 -----
68 When a cluster's local maxima correspond to contiguous voxels with the
69 same values (as in a binary cluster), this function determines the center
70 of mass for those voxels.
71 """
72 # Initial identification of subpeaks with minimal minimum distance
73 data_max = ndimage.filters.maximum_filter(data, 3)
74 maxima = data == data_max
75 data_min = ndimage.filters.minimum_filter(data, 3)
76 diff = (data_max - data_min) > 0
77 maxima[diff == 0] = 0
78
79 labeled, n_subpeaks = ndimage.label(maxima)
80 labels_index = range(1, n_subpeaks + 1)
81 ijk = np.array(ndimage.center_of_mass(data, labeled, labels_index))
82 ijk = np.round(ijk).astype(int)
83 vals = np.apply_along_axis(
84 arr=ijk, axis=1, func1d=_get_val, input_arr=data
85 )
86 # Determine if all subpeaks are within the cluster
87 # They may not be if the cluster is binary and has a shape where the COM is
88 # outside the cluster, like a donut.
89 cluster_idx = np.vstack(np.where(labeled)).T.tolist()
90 subpeaks_outside_cluster = [
91 i
92 for i, peak_idx in enumerate(ijk.tolist())
93 if peak_idx not in cluster_idx
94 ]
95 vals[subpeaks_outside_cluster] = np.nan
96 if subpeaks_outside_cluster:
97 warnings.warn(
98 "Attention: At least one of the (sub)peaks falls outside of the "
99 "cluster body."
100 )
101 return ijk, vals
102
103
104 def _sort_subpeaks(ijk, vals, affine):
105 # Sort subpeaks in cluster in descending order of stat value
106 order = (-vals).argsort()
107 vals = vals[order]
108 ijk = ijk[order, :]
109 xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm
110 return xyz, ijk, vals
111
112
113 def _pare_subpeaks(xyz, ijk, vals, min_distance):
114 # Reduce list of subpeaks based on distance
115 keep_idx = np.ones(xyz.shape[0]).astype(bool)
116 for i in range(xyz.shape[0]):
117 for j in range(i + 1, xyz.shape[0]):
118 if keep_idx[i] == 1:
119 dist = np.linalg.norm(xyz[i, :] - xyz[j, :])
120 keep_idx[j] = dist > min_distance
121 ijk = ijk[keep_idx, :]
122 vals = vals[keep_idx]
123 return ijk, vals
124
125
126 def _get_val(row, input_arr):
127 """Small function for extracting values from array based on index.
128 """
129 i, j, k = row
130 return input_arr[i, j, k]
131
132
133 def get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,
134 two_sided=False, min_distance=8.):
135 """Creates pandas dataframe with img cluster statistics.
136
137 Parameters
138 ----------
139 stat_img : Niimg-like object,
140 Statistical image (presumably in z- or p-scale).
141
142 stat_threshold : `float`
143 Cluster forming threshold in same scale as `stat_img` (either a
144 p-value or z-scale value).
145
146 cluster_threshold : `int` or `None`, optional
147 Cluster size threshold, in voxels.
148
149 two_sided : `bool`, optional
150 Whether to employ two-sided thresholding or to evaluate positive values
151 only. Default=False.
152
153 min_distance : `float`, optional
154 Minimum distance between subpeaks in mm. Default=8mm.
155
156 Returns
157 -------
158 df : `pandas.DataFrame`
159 Table with peaks and subpeaks from thresholded `stat_img`. For binary
160 clusters (clusters with >1 voxel containing only one value), the table
161 reports the center of mass of the cluster,
162 rather than any peaks/subpeaks.
163
164 """
165 cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']
166
167 # check that stat_img is niimg-like object and 3D
168 stat_img = check_niimg_3d(stat_img)
169 # If cluster threshold is used, there is chance that stat_map will be
170 # modified, therefore copy is needed
171 if cluster_threshold is None:
172 stat_map = get_data(stat_img)
173 else:
174 stat_map = get_data(stat_img).copy()
175
176 # Define array for 6-connectivity, aka NN1 or "faces"
177 conn_mat = np.zeros((3, 3, 3), int)
178 conn_mat[1, 1, :] = 1
179 conn_mat[1, :, 1] = 1
180 conn_mat[:, 1, 1] = 1
181 voxel_size = np.prod(stat_img.header.get_zooms())
182
183 signs = [1, -1] if two_sided else [1]
184 no_clusters_found = True
185 rows = []
186 for sign in signs:
187 # Flip map if necessary
188 temp_stat_map = stat_map * sign
189
190 # Binarize using CDT
191 binarized = temp_stat_map > stat_threshold
192 binarized = binarized.astype(int)
193
194 # If the stat threshold is too high simply return an empty dataframe
195 if np.sum(binarized) == 0:
196 warnings.warn(
197 'Attention: No clusters with stat {0} than {1}'.format(
198 'higher' if sign == 1 else 'lower',
199 stat_threshold * sign,
200 )
201 )
202 continue
203
204 # Extract connected components above cluster size threshold
205 label_map = ndimage.measurements.label(binarized, conn_mat)[0]
206 clust_ids = sorted(list(np.unique(label_map)[1:]))
207 for c_val in clust_ids:
208 if cluster_threshold is not None and np.sum(
209 label_map == c_val) < cluster_threshold:
210 temp_stat_map[label_map == c_val] = 0
211 binarized[label_map == c_val] = 0
212
213 # If the cluster threshold is too high simply return an empty dataframe
214 # this checks for stats higher than threshold after small clusters
215 # were removed from temp_stat_map
216 if np.sum(temp_stat_map > stat_threshold) == 0:
217 warnings.warn(
218 'Attention: No clusters with more than {0} voxels'.format(
219 cluster_threshold,
220 )
221 )
222 continue
223
224 # Now re-label and create table
225 label_map = ndimage.measurements.label(binarized, conn_mat)[0]
226 clust_ids = sorted(list(np.unique(label_map)[1:]))
227 peak_vals = np.array(
228 [np.max(temp_stat_map * (label_map == c)) for c in clust_ids])
229 # Sort by descending max value
230 clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]
231
232 for c_id, c_val in enumerate(clust_ids):
233 cluster_mask = label_map == c_val
234 masked_data = temp_stat_map * cluster_mask
235
236 cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)
237
238 # Get peaks, subpeaks and associated statistics
239 subpeak_ijk, subpeak_vals = _local_max(
240 masked_data,
241 stat_img.affine,
242 min_distance=min_distance,
243 )
244 subpeak_vals *= sign # flip signs if necessary
245 subpeak_xyz = np.asarray(
246 coord_transform(
247 subpeak_ijk[:, 0],
248 subpeak_ijk[:, 1],
249 subpeak_ijk[:, 2],
250 stat_img.affine,
251 )
252 ).tolist()
253 subpeak_xyz = np.array(subpeak_xyz).T
254
255 # Only report peak and, at most, top 3 subpeaks.
256 n_subpeaks = np.min((len(subpeak_vals), 4))
257 for subpeak in range(n_subpeaks):
258 if subpeak == 0:
259 row = [
260 c_id + 1,
261 subpeak_xyz[subpeak, 0],
262 subpeak_xyz[subpeak, 1],
263 subpeak_xyz[subpeak, 2],
264 subpeak_vals[subpeak],
265 cluster_size_mm,
266 ]
267 else:
268 # Subpeak naming convention is cluster num+letter:
269 # 1a, 1b, etc
270 sp_id = '{0}{1}'.format(
271 c_id + 1,
272 ascii_lowercase[subpeak - 1],
273 )
274 row = [
275 sp_id,
276 subpeak_xyz[subpeak, 0],
277 subpeak_xyz[subpeak, 1],
278 subpeak_xyz[subpeak, 2],
279 subpeak_vals[subpeak],
280 '',
281 ]
282 rows += [row]
283
284 # If we reach this point, there are clusters in this sign
285 no_clusters_found = False
286
287 if no_clusters_found:
288 df = pd.DataFrame(columns=cols)
289 else:
290 df = pd.DataFrame(columns=cols, data=rows)
291
292 return df
293
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/nilearn/reporting/_get_clusters_table.py b/nilearn/reporting/_get_clusters_table.py
--- a/nilearn/reporting/_get_clusters_table.py
+++ b/nilearn/reporting/_get_clusters_table.py
@@ -12,7 +12,7 @@
import nibabel as nib
from scipy import ndimage
-from nilearn.image import get_data
+from nilearn._utils.niimg import _safe_get_data
from nilearn.image.resampling import coord_transform
from nilearn._utils import check_niimg_3d
@@ -168,10 +168,8 @@
stat_img = check_niimg_3d(stat_img)
# If cluster threshold is used, there is chance that stat_map will be
# modified, therefore copy is needed
- if cluster_threshold is None:
- stat_map = get_data(stat_img)
- else:
- stat_map = get_data(stat_img).copy()
+ stat_map = _safe_get_data(stat_img, ensure_finite=True,
+ copy_data=(cluster_threshold is not None))
# Define array for 6-connectivity, aka NN1 or "faces"
conn_mat = np.zeros((3, 3, 3), int)
|
{"golden_diff": "diff --git a/nilearn/reporting/_get_clusters_table.py b/nilearn/reporting/_get_clusters_table.py\n--- a/nilearn/reporting/_get_clusters_table.py\n+++ b/nilearn/reporting/_get_clusters_table.py\n@@ -12,7 +12,7 @@\n import nibabel as nib\n from scipy import ndimage\n \n-from nilearn.image import get_data\n+from nilearn._utils.niimg import _safe_get_data\n from nilearn.image.resampling import coord_transform\n from nilearn._utils import check_niimg_3d\n \n@@ -168,10 +168,8 @@\n stat_img = check_niimg_3d(stat_img)\n # If cluster threshold is used, there is chance that stat_map will be\n # modified, therefore copy is needed\n- if cluster_threshold is None:\n- stat_map = get_data(stat_img)\n- else:\n- stat_map = get_data(stat_img).copy()\n+ stat_map = _safe_get_data(stat_img, ensure_finite=True,\n+ copy_data=(cluster_threshold is not None))\n \n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n", "issue": "AxisError when computing `reporting.get_cluster_table` \nThis issue concerns what seems to me like a bug in the `reporting.get_cluster_table` method. I have been encoutering a `AxisError` raised by numpy when trying to compute the table with certain `stat_threshold` values.\r\n\r\nWith a high threshold value (e.g `3.`) the computation works fine, but when using smaller threshold values (1e-2), the error is thrown.\r\n\r\nThe error happens because the `ijk` value passed to a numpy `apply_along_axis` [here](https://github.com/nilearn/nilearn/blob/ac1a934e3b2b4061f894b518c960412df9ea4f11/nilearn/reporting/_get_clusters_table.py#L83) is an empty list. I don't know why yet. You can reproduce the error with the snippet below.\r\n\r\nI guess it makes less sense to extract peaks with a lower threshold, but it still worth investigating why the error occurs.\r\n\r\n### Expected behavior\r\n\r\n`get_clusters_table` return the clusters table whatever the `stat_threshold` value\r\n\r\n### Actual behavior\r\n\r\n`get_clusters_table` raises an error when `stat_threshold` values are small.\r\n\r\n### Steps and code to reproduce bug\r\n\r\n```python\r\nimport tempfile\r\n\r\nfrom nilearn import reporting\r\nfrom nilearn.datasets import fetch_neurovault_ids\r\n\r\nDATA_PATH = tempfile.TemporaryDirectory()\r\n\r\ncollection_id = 307\r\nneurovault_element = fetch_neurovault_ids(\r\n collection_ids=[collection_id],\r\n mode=\"download_new\",\r\n data_dir=str(DATA_PATH),\r\n)\r\nsample_brain_map = neurovault_element.images[0]\r\n\r\n# Working fine with a stat threshold of 3.\r\nneurovault_peaks = reporting.get_clusters_table(\r\n sample_brain_map,\r\n stat_threshold=3.,\r\n min_distance=12.,\r\n)\r\n\r\n# Raising error with a stat threshold of 1e-2\r\nneurovault_peaks = reporting.get_clusters_table(\r\n sample_brain_map,\r\n stat_threshold=1e-2,\r\n min_distance=12.,\r\n)\r\n```\r\n\n", "before_files": [{"content": "\"\"\"\nThis module implements plotting functions useful to report analysis results.\n\nAuthor: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\"\"\"\n\nimport warnings\nfrom string import ascii_lowercase\n\nimport numpy as np\nimport pandas as pd\nimport nibabel as nib\nfrom scipy import ndimage\n\nfrom nilearn.image import get_data\nfrom nilearn.image.resampling import coord_transform\nfrom nilearn._utils import check_niimg_3d\n\n\ndef _local_max(data, affine, min_distance):\n \"\"\"Find all local maxima of the array, separated by at least min_distance.\n Adapted from https://stackoverflow.com/a/22631583/2589328\n\n Parameters\n ----------\n data : array_like\n 3D array of with masked values for cluster.\n\n affine : np.ndarray\n Square matrix specifying the position of the image array data\n in a reference space.\n\n min_distance : int\n Minimum distance between local maxima in ``data``, in terms of mm.\n\n Returns\n -------\n ijk : `numpy.ndarray`\n (n_foci, 3) array of local maxima indices for cluster.\n\n vals : `numpy.ndarray`\n (n_foci,) array of values from data at ijk.\n\n \"\"\"\n ijk, vals = _identify_subpeaks(data)\n xyz, ijk, vals = _sort_subpeaks(ijk, vals, affine)\n ijk, vals = _pare_subpeaks(xyz, ijk, vals, min_distance)\n return ijk, vals\n\n\ndef _identify_subpeaks(data):\n \"\"\"Identify cluster peak and subpeaks based on minimum distance.\n\n Parameters\n ----------\n data : `numpy.ndarray`\n 3D array of with masked values for cluster.\n\n Returns\n -------\n ijk : `numpy.ndarray`\n (n_foci, 3) array of local maxima indices for cluster.\n vals : `numpy.ndarray`\n (n_foci,) array of values from data at ijk.\n\n Notes\n -----\n When a cluster's local maxima correspond to contiguous voxels with the\n same values (as in a binary cluster), this function determines the center\n of mass for those voxels.\n \"\"\"\n # Initial identification of subpeaks with minimal minimum distance\n data_max = ndimage.filters.maximum_filter(data, 3)\n maxima = data == data_max\n data_min = ndimage.filters.minimum_filter(data, 3)\n diff = (data_max - data_min) > 0\n maxima[diff == 0] = 0\n\n labeled, n_subpeaks = ndimage.label(maxima)\n labels_index = range(1, n_subpeaks + 1)\n ijk = np.array(ndimage.center_of_mass(data, labeled, labels_index))\n ijk = np.round(ijk).astype(int)\n vals = np.apply_along_axis(\n arr=ijk, axis=1, func1d=_get_val, input_arr=data\n )\n # Determine if all subpeaks are within the cluster\n # They may not be if the cluster is binary and has a shape where the COM is\n # outside the cluster, like a donut.\n cluster_idx = np.vstack(np.where(labeled)).T.tolist()\n subpeaks_outside_cluster = [\n i\n for i, peak_idx in enumerate(ijk.tolist())\n if peak_idx not in cluster_idx\n ]\n vals[subpeaks_outside_cluster] = np.nan\n if subpeaks_outside_cluster:\n warnings.warn(\n \"Attention: At least one of the (sub)peaks falls outside of the \"\n \"cluster body.\"\n )\n return ijk, vals\n\n\ndef _sort_subpeaks(ijk, vals, affine):\n # Sort subpeaks in cluster in descending order of stat value\n order = (-vals).argsort()\n vals = vals[order]\n ijk = ijk[order, :]\n xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm\n return xyz, ijk, vals\n\n\ndef _pare_subpeaks(xyz, ijk, vals, min_distance):\n # Reduce list of subpeaks based on distance\n keep_idx = np.ones(xyz.shape[0]).astype(bool)\n for i in range(xyz.shape[0]):\n for j in range(i + 1, xyz.shape[0]):\n if keep_idx[i] == 1:\n dist = np.linalg.norm(xyz[i, :] - xyz[j, :])\n keep_idx[j] = dist > min_distance\n ijk = ijk[keep_idx, :]\n vals = vals[keep_idx]\n return ijk, vals\n\n\ndef _get_val(row, input_arr):\n \"\"\"Small function for extracting values from array based on index.\n \"\"\"\n i, j, k = row\n return input_arr[i, j, k]\n\n\ndef get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,\n two_sided=False, min_distance=8.):\n \"\"\"Creates pandas dataframe with img cluster statistics.\n\n Parameters\n ----------\n stat_img : Niimg-like object,\n Statistical image (presumably in z- or p-scale).\n\n stat_threshold : `float`\n Cluster forming threshold in same scale as `stat_img` (either a\n p-value or z-scale value).\n\n cluster_threshold : `int` or `None`, optional\n Cluster size threshold, in voxels.\n\n two_sided : `bool`, optional\n Whether to employ two-sided thresholding or to evaluate positive values\n only. Default=False.\n\n min_distance : `float`, optional\n Minimum distance between subpeaks in mm. Default=8mm.\n\n Returns\n -------\n df : `pandas.DataFrame`\n Table with peaks and subpeaks from thresholded `stat_img`. For binary\n clusters (clusters with >1 voxel containing only one value), the table\n reports the center of mass of the cluster,\n rather than any peaks/subpeaks.\n\n \"\"\"\n cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']\n\n # check that stat_img is niimg-like object and 3D\n stat_img = check_niimg_3d(stat_img)\n # If cluster threshold is used, there is chance that stat_map will be\n # modified, therefore copy is needed\n if cluster_threshold is None:\n stat_map = get_data(stat_img)\n else:\n stat_map = get_data(stat_img).copy()\n\n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n conn_mat[1, 1, :] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[:, 1, 1] = 1\n voxel_size = np.prod(stat_img.header.get_zooms())\n\n signs = [1, -1] if two_sided else [1]\n no_clusters_found = True\n rows = []\n for sign in signs:\n # Flip map if necessary\n temp_stat_map = stat_map * sign\n\n # Binarize using CDT\n binarized = temp_stat_map > stat_threshold\n binarized = binarized.astype(int)\n\n # If the stat threshold is too high simply return an empty dataframe\n if np.sum(binarized) == 0:\n warnings.warn(\n 'Attention: No clusters with stat {0} than {1}'.format(\n 'higher' if sign == 1 else 'lower',\n stat_threshold * sign,\n )\n )\n continue\n\n # Extract connected components above cluster size threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if cluster_threshold is not None and np.sum(\n label_map == c_val) < cluster_threshold:\n temp_stat_map[label_map == c_val] = 0\n binarized[label_map == c_val] = 0\n\n # If the cluster threshold is too high simply return an empty dataframe\n # this checks for stats higher than threshold after small clusters\n # were removed from temp_stat_map\n if np.sum(temp_stat_map > stat_threshold) == 0:\n warnings.warn(\n 'Attention: No clusters with more than {0} voxels'.format(\n cluster_threshold,\n )\n )\n continue\n\n # Now re-label and create table\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n peak_vals = np.array(\n [np.max(temp_stat_map * (label_map == c)) for c in clust_ids])\n # Sort by descending max value\n clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]\n\n for c_id, c_val in enumerate(clust_ids):\n cluster_mask = label_map == c_val\n masked_data = temp_stat_map * cluster_mask\n\n cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)\n\n # Get peaks, subpeaks and associated statistics\n subpeak_ijk, subpeak_vals = _local_max(\n masked_data,\n stat_img.affine,\n min_distance=min_distance,\n )\n subpeak_vals *= sign # flip signs if necessary\n subpeak_xyz = np.asarray(\n coord_transform(\n subpeak_ijk[:, 0],\n subpeak_ijk[:, 1],\n subpeak_ijk[:, 2],\n stat_img.affine,\n )\n ).tolist()\n subpeak_xyz = np.array(subpeak_xyz).T\n\n # Only report peak and, at most, top 3 subpeaks.\n n_subpeaks = np.min((len(subpeak_vals), 4))\n for subpeak in range(n_subpeaks):\n if subpeak == 0:\n row = [\n c_id + 1,\n subpeak_xyz[subpeak, 0],\n subpeak_xyz[subpeak, 1],\n subpeak_xyz[subpeak, 2],\n subpeak_vals[subpeak],\n cluster_size_mm,\n ]\n else:\n # Subpeak naming convention is cluster num+letter:\n # 1a, 1b, etc\n sp_id = '{0}{1}'.format(\n c_id + 1,\n ascii_lowercase[subpeak - 1],\n )\n row = [\n sp_id,\n subpeak_xyz[subpeak, 0],\n subpeak_xyz[subpeak, 1],\n subpeak_xyz[subpeak, 2],\n subpeak_vals[subpeak],\n '',\n ]\n rows += [row]\n\n # If we reach this point, there are clusters in this sign\n no_clusters_found = False\n\n if no_clusters_found:\n df = pd.DataFrame(columns=cols)\n else:\n df = pd.DataFrame(columns=cols, data=rows)\n\n return df\n", "path": "nilearn/reporting/_get_clusters_table.py"}], "after_files": [{"content": "\"\"\"\nThis module implements plotting functions useful to report analysis results.\n\nAuthor: Martin Perez-Guevara, Elvis Dohmatob, 2017\n\"\"\"\n\nimport warnings\nfrom string import ascii_lowercase\n\nimport numpy as np\nimport pandas as pd\nimport nibabel as nib\nfrom scipy import ndimage\n\nfrom nilearn._utils.niimg import _safe_get_data\nfrom nilearn.image.resampling import coord_transform\nfrom nilearn._utils import check_niimg_3d\n\n\ndef _local_max(data, affine, min_distance):\n \"\"\"Find all local maxima of the array, separated by at least min_distance.\n Adapted from https://stackoverflow.com/a/22631583/2589328\n\n Parameters\n ----------\n data : array_like\n 3D array of with masked values for cluster.\n\n affine : np.ndarray\n Square matrix specifying the position of the image array data\n in a reference space.\n\n min_distance : int\n Minimum distance between local maxima in ``data``, in terms of mm.\n\n Returns\n -------\n ijk : `numpy.ndarray`\n (n_foci, 3) array of local maxima indices for cluster.\n\n vals : `numpy.ndarray`\n (n_foci,) array of values from data at ijk.\n\n \"\"\"\n ijk, vals = _identify_subpeaks(data)\n xyz, ijk, vals = _sort_subpeaks(ijk, vals, affine)\n ijk, vals = _pare_subpeaks(xyz, ijk, vals, min_distance)\n return ijk, vals\n\n\ndef _identify_subpeaks(data):\n \"\"\"Identify cluster peak and subpeaks based on minimum distance.\n\n Parameters\n ----------\n data : `numpy.ndarray`\n 3D array of with masked values for cluster.\n\n Returns\n -------\n ijk : `numpy.ndarray`\n (n_foci, 3) array of local maxima indices for cluster.\n vals : `numpy.ndarray`\n (n_foci,) array of values from data at ijk.\n\n Notes\n -----\n When a cluster's local maxima correspond to contiguous voxels with the\n same values (as in a binary cluster), this function determines the center\n of mass for those voxels.\n \"\"\"\n # Initial identification of subpeaks with minimal minimum distance\n data_max = ndimage.filters.maximum_filter(data, 3)\n maxima = data == data_max\n data_min = ndimage.filters.minimum_filter(data, 3)\n diff = (data_max - data_min) > 0\n maxima[diff == 0] = 0\n\n labeled, n_subpeaks = ndimage.label(maxima)\n labels_index = range(1, n_subpeaks + 1)\n ijk = np.array(ndimage.center_of_mass(data, labeled, labels_index))\n ijk = np.round(ijk).astype(int)\n vals = np.apply_along_axis(\n arr=ijk, axis=1, func1d=_get_val, input_arr=data\n )\n # Determine if all subpeaks are within the cluster\n # They may not be if the cluster is binary and has a shape where the COM is\n # outside the cluster, like a donut.\n cluster_idx = np.vstack(np.where(labeled)).T.tolist()\n subpeaks_outside_cluster = [\n i\n for i, peak_idx in enumerate(ijk.tolist())\n if peak_idx not in cluster_idx\n ]\n vals[subpeaks_outside_cluster] = np.nan\n if subpeaks_outside_cluster:\n warnings.warn(\n \"Attention: At least one of the (sub)peaks falls outside of the \"\n \"cluster body.\"\n )\n return ijk, vals\n\n\ndef _sort_subpeaks(ijk, vals, affine):\n # Sort subpeaks in cluster in descending order of stat value\n order = (-vals).argsort()\n vals = vals[order]\n ijk = ijk[order, :]\n xyz = nib.affines.apply_affine(affine, ijk) # Convert to xyz in mm\n return xyz, ijk, vals\n\n\ndef _pare_subpeaks(xyz, ijk, vals, min_distance):\n # Reduce list of subpeaks based on distance\n keep_idx = np.ones(xyz.shape[0]).astype(bool)\n for i in range(xyz.shape[0]):\n for j in range(i + 1, xyz.shape[0]):\n if keep_idx[i] == 1:\n dist = np.linalg.norm(xyz[i, :] - xyz[j, :])\n keep_idx[j] = dist > min_distance\n ijk = ijk[keep_idx, :]\n vals = vals[keep_idx]\n return ijk, vals\n\n\ndef _get_val(row, input_arr):\n \"\"\"Small function for extracting values from array based on index.\n \"\"\"\n i, j, k = row\n return input_arr[i, j, k]\n\n\ndef get_clusters_table(stat_img, stat_threshold, cluster_threshold=None,\n two_sided=False, min_distance=8.):\n \"\"\"Creates pandas dataframe with img cluster statistics.\n\n Parameters\n ----------\n stat_img : Niimg-like object,\n Statistical image (presumably in z- or p-scale).\n\n stat_threshold : `float`\n Cluster forming threshold in same scale as `stat_img` (either a\n p-value or z-scale value).\n\n cluster_threshold : `int` or `None`, optional\n Cluster size threshold, in voxels.\n\n two_sided : `bool`, optional\n Whether to employ two-sided thresholding or to evaluate positive values\n only. Default=False.\n\n min_distance : `float`, optional\n Minimum distance between subpeaks in mm. Default=8mm.\n\n Returns\n -------\n df : `pandas.DataFrame`\n Table with peaks and subpeaks from thresholded `stat_img`. For binary\n clusters (clusters with >1 voxel containing only one value), the table\n reports the center of mass of the cluster,\n rather than any peaks/subpeaks.\n\n \"\"\"\n cols = ['Cluster ID', 'X', 'Y', 'Z', 'Peak Stat', 'Cluster Size (mm3)']\n\n # check that stat_img is niimg-like object and 3D\n stat_img = check_niimg_3d(stat_img)\n # If cluster threshold is used, there is chance that stat_map will be\n # modified, therefore copy is needed\n stat_map = _safe_get_data(stat_img, ensure_finite=True,\n copy_data=(cluster_threshold is not None))\n\n # Define array for 6-connectivity, aka NN1 or \"faces\"\n conn_mat = np.zeros((3, 3, 3), int)\n conn_mat[1, 1, :] = 1\n conn_mat[1, :, 1] = 1\n conn_mat[:, 1, 1] = 1\n voxel_size = np.prod(stat_img.header.get_zooms())\n\n signs = [1, -1] if two_sided else [1]\n no_clusters_found = True\n rows = []\n for sign in signs:\n # Flip map if necessary\n temp_stat_map = stat_map * sign\n\n # Binarize using CDT\n binarized = temp_stat_map > stat_threshold\n binarized = binarized.astype(int)\n\n # If the stat threshold is too high simply return an empty dataframe\n if np.sum(binarized) == 0:\n warnings.warn(\n 'Attention: No clusters with stat {0} than {1}'.format(\n 'higher' if sign == 1 else 'lower',\n stat_threshold * sign,\n )\n )\n continue\n\n # Extract connected components above cluster size threshold\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n for c_val in clust_ids:\n if cluster_threshold is not None and np.sum(\n label_map == c_val) < cluster_threshold:\n temp_stat_map[label_map == c_val] = 0\n binarized[label_map == c_val] = 0\n\n # If the cluster threshold is too high simply return an empty dataframe\n # this checks for stats higher than threshold after small clusters\n # were removed from temp_stat_map\n if np.sum(temp_stat_map > stat_threshold) == 0:\n warnings.warn(\n 'Attention: No clusters with more than {0} voxels'.format(\n cluster_threshold,\n )\n )\n continue\n\n # Now re-label and create table\n label_map = ndimage.measurements.label(binarized, conn_mat)[0]\n clust_ids = sorted(list(np.unique(label_map)[1:]))\n peak_vals = np.array(\n [np.max(temp_stat_map * (label_map == c)) for c in clust_ids])\n # Sort by descending max value\n clust_ids = [clust_ids[c] for c in (-peak_vals).argsort()]\n\n for c_id, c_val in enumerate(clust_ids):\n cluster_mask = label_map == c_val\n masked_data = temp_stat_map * cluster_mask\n\n cluster_size_mm = int(np.sum(cluster_mask) * voxel_size)\n\n # Get peaks, subpeaks and associated statistics\n subpeak_ijk, subpeak_vals = _local_max(\n masked_data,\n stat_img.affine,\n min_distance=min_distance,\n )\n subpeak_vals *= sign # flip signs if necessary\n subpeak_xyz = np.asarray(\n coord_transform(\n subpeak_ijk[:, 0],\n subpeak_ijk[:, 1],\n subpeak_ijk[:, 2],\n stat_img.affine,\n )\n ).tolist()\n subpeak_xyz = np.array(subpeak_xyz).T\n\n # Only report peak and, at most, top 3 subpeaks.\n n_subpeaks = np.min((len(subpeak_vals), 4))\n for subpeak in range(n_subpeaks):\n if subpeak == 0:\n row = [\n c_id + 1,\n subpeak_xyz[subpeak, 0],\n subpeak_xyz[subpeak, 1],\n subpeak_xyz[subpeak, 2],\n subpeak_vals[subpeak],\n cluster_size_mm,\n ]\n else:\n # Subpeak naming convention is cluster num+letter:\n # 1a, 1b, etc\n sp_id = '{0}{1}'.format(\n c_id + 1,\n ascii_lowercase[subpeak - 1],\n )\n row = [\n sp_id,\n subpeak_xyz[subpeak, 0],\n subpeak_xyz[subpeak, 1],\n subpeak_xyz[subpeak, 2],\n subpeak_vals[subpeak],\n '',\n ]\n rows += [row]\n\n # If we reach this point, there are clusters in this sign\n no_clusters_found = False\n\n if no_clusters_found:\n df = pd.DataFrame(columns=cols)\n else:\n df = pd.DataFrame(columns=cols, data=rows)\n\n return df\n", "path": "nilearn/reporting/_get_clusters_table.py"}]}
| 3,945 | 279 |
gh_patches_debug_8735
|
rasdani/github-patches
|
git_diff
|
pyro-ppl__pyro-3251
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Docs builds are failing
Docs builds seem to have been failing since 1.8.5 release. We should fix this before the 1.8.6 release.
https://readthedocs.org/projects/pyro-ppl/builds/20847164/
<img width="812" alt="image" src="https://github.com/pyro-ppl/pyro/assets/648532/45149fae-a72d-481a-aaf9-73262d50aa92">
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/source/conf.py`
Content:
```
1 # Copyright (c) 2017-2019 Uber Technologies, Inc.
2 # SPDX-License-Identifier: Apache-2.0
3
4 import os
5 import sys
6
7 import sphinx_rtd_theme
8
9 # import pkg_resources
10
11 # -*- coding: utf-8 -*-
12 #
13 # Pyro documentation build configuration file, created by
14 # sphinx-quickstart on Thu Jun 15 17:16:14 2017.
15 #
16 # This file is execfile()d with the current directory set to its
17 # containing dir.
18 #
19 # Note that not all possible configuration values are present in this
20 # autogenerated file.
21 #
22 # All configuration values have a default; values that are commented out
23 # serve to show the default.
24
25 # If extensions (or modules to document with autodoc) are in another directory,
26 # add these directories to sys.path here. If the directory is relative to the
27 # documentation root, use os.path.abspath to make it absolute, like shown here.
28 #
29 sys.path.insert(0, os.path.abspath("../.."))
30
31 # -- General configuration ------------------------------------------------
32
33 # If your documentation needs a minimal Sphinx version, state it here.
34 #
35 # needs_sphinx = '1.0'
36
37 # Add any Sphinx extension module names here, as strings. They can be
38 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
39 # ones.
40 extensions = [
41 "sphinx.ext.intersphinx", #
42 "sphinx.ext.todo", #
43 "sphinx.ext.mathjax", #
44 "sphinx.ext.ifconfig", #
45 "sphinx.ext.viewcode", #
46 "sphinx.ext.githubpages", #
47 "sphinx.ext.graphviz", #
48 "sphinx.ext.autodoc",
49 "sphinx.ext.doctest",
50 'sphinx.ext.napoleon',
51 ]
52
53 # Disable documentation inheritance so as to avoid inheriting
54 # docstrings in a different format, e.g. when the parent class
55 # is a PyTorch class.
56
57 autodoc_inherit_docstrings = False
58
59 # Add any paths that contain templates here, relative to this directory.
60 templates_path = ["_templates"]
61
62 # The suffix(es) of source filenames.
63 # You can specify multiple suffix as a list of string:
64 #
65 # source_suffix = ['.rst', '.md']
66 source_suffix = ".rst"
67
68 # The master toctree document.
69 master_doc = "index"
70
71 # General information about the project.
72 project = u"Pyro"
73 copyright = u"2017-2018, Uber Technologies, Inc"
74 author = u"Uber AI Labs"
75
76 # The version info for the project you're documenting, acts as replacement for
77 # |version| and |release|, also used in various other places throughout the
78 # built documents.
79
80 version = ""
81
82 if "READTHEDOCS" not in os.environ:
83 # if developing locally, use pyro.__version__ as version
84 from pyro import __version__ # noqaE402
85
86 version = __version__
87
88 # release version
89 release = version
90
91 # The language for content autogenerated by Sphinx. Refer to documentation
92 # for a list of supported languages.
93 #
94 # This is also used if you do content translation via gettext catalogs.
95 # Usually you set "language" from the command line for these cases.
96 language = "en"
97
98 # List of patterns, relative to source directory, that match files and
99 # directories to ignore when looking for source files.
100 # This patterns also effect to html_static_path and html_extra_path
101 exclude_patterns = []
102
103 # The name of the Pygments (syntax highlighting) style to use.
104 pygments_style = "sphinx"
105
106 # If true, `todo` and `todoList` produce output, else they produce nothing.
107 todo_include_todos = True
108
109 # do not prepend module name to functions
110 add_module_names = False
111
112 # -- Options for HTML output ----------------------------------------------
113
114 # logo
115 html_logo = "_static/img/pyro_logo_wide.png"
116
117 # logo
118 html_favicon = "_static/img/favicon/favicon.ico"
119
120 # The theme to use for HTML and HTML Help pages. See the documentation for
121 # a list of builtin themes.
122 #
123 html_theme = "sphinx_rtd_theme"
124 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
125
126 # Theme options are theme-specific and customize the look and feel of a theme
127 # further. For a list of options available for each theme, see the
128 # documentation.
129
130 html_theme_options = {
131 "navigation_depth": 3,
132 "logo_only": True,
133 }
134
135 # Add any paths that contain custom static files (such as style sheets) here,
136 # relative to this directory. They are copied after the builtin static files,
137 # so a file named "default.css" will overwrite the builtin "default.css".
138 html_static_path = ["_static"]
139 html_style = "css/pyro.css"
140
141 # -- Options for HTMLHelp output ------------------------------------------
142
143 # Output file base name for HTML help builder.
144 htmlhelp_basename = "Pyrodoc"
145
146 # -- Options for LaTeX output ---------------------------------------------
147
148 latex_elements = {
149 # The paper size ('letterpaper' or 'a4paper').
150 #
151 # 'papersize': 'letterpaper',
152 # The font size ('10pt', '11pt' or '12pt').
153 #
154 # 'pointsize': '10pt',
155 # Additional stuff for the LaTeX preamble.
156 #
157 # 'preamble': '',
158 # Latex figure (float) alignment
159 #
160 # 'figure_align': 'htbp',
161 }
162
163 # Grouping the document tree into LaTeX files. List of tuples
164 # (source start file, target name, title,
165 # author, documentclass [howto, manual, or own class]).
166 latex_documents = [
167 # Disabled pdf builds to unblock readthedocs failed builds;
168 # see https://github.com/pyro-ppl/pyro/issues/3248
169 # (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
170 ]
171
172 # -- Options for manual page output ---------------------------------------
173
174 # One entry per manual page. List of tuples
175 # (source start file, name, description, authors, manual section).
176 man_pages = [(master_doc, "pyro", u"Pyro Documentation", [author], 1)]
177
178 # -- Options for Texinfo output -------------------------------------------
179
180 # Grouping the document tree into Texinfo files. List of tuples
181 # (source start file, target name, title, author,
182 # dir menu entry, description, category)
183 texinfo_documents = [
184 (
185 master_doc,
186 "Pyro",
187 u"Pyro Documentation",
188 author,
189 "Pyro",
190 "Deep Universal Probabilistic Programming.",
191 "Miscellaneous",
192 ),
193 ]
194
195 # Example configuration for intersphinx: refer to the Python standard library.
196 intersphinx_mapping = {
197 "python": ("https://docs.python.org/3/", None),
198 "torch": ("https://pytorch.org/docs/master/", None),
199 "funsor": ("http://funsor.pyro.ai/en/stable/", None),
200 "opt_einsum": ("https://optimized-einsum.readthedocs.io/en/stable/", None),
201 "scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
202 "Bio": ("https://biopython.org/docs/latest/api/", None),
203 "horovod": ("https://horovod.readthedocs.io/en/stable/", None),
204 "graphviz": ("https://graphviz.readthedocs.io/en/stable/", None),
205 }
206
207 # document class constructors (__init__ methods):
208 """ comment out this functionality for now;
209 def skip(app, what, name, obj, skip, options):
210 if name == "__init__":
211 return False
212 return skip
213 """
214
215
216 def setup(app):
217 app.add_css_file("css/pyro.css")
218
219
220 # app.connect("autodoc-skip-member", skip)
221
222
223 # @jpchen's hack to get rtd builder to install latest pytorch
224 # See similar line in the install section of .travis.yml
225 if "READTHEDOCS" in os.environ:
226 os.system("pip install numpy")
227 os.system(
228 "pip install torch==1.11.0+cpu torchvision==0.12.0+cpu "
229 "-f https://download.pytorch.org/whl/torch_stable.html"
230 )
231
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/source/conf.py b/docs/source/conf.py
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -164,9 +164,7 @@
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
- # Disabled pdf builds to unblock readthedocs failed builds;
- # see https://github.com/pyro-ppl/pyro/issues/3248
- # (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
+ (master_doc, "Pyro.tex", u"Pyro Documentation", u"Uber AI Labs", "manual"),
]
# -- Options for manual page output ---------------------------------------
|
{"golden_diff": "diff --git a/docs/source/conf.py b/docs/source/conf.py\n--- a/docs/source/conf.py\n+++ b/docs/source/conf.py\n@@ -164,9 +164,7 @@\n # (source start file, target name, title,\n # author, documentclass [howto, manual, or own class]).\n latex_documents = [\n- # Disabled pdf builds to unblock readthedocs failed builds;\n- # see https://github.com/pyro-ppl/pyro/issues/3248\n- # (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n+ (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n ]\n \n # -- Options for manual page output ---------------------------------------\n", "issue": "Docs builds are failing\nDocs builds seem to have been failing since 1.8.5 release. We should fix this before the 1.8.6 release.\r\n\r\nhttps://readthedocs.org/projects/pyro-ppl/builds/20847164/\r\n<img width=\"812\" alt=\"image\" src=\"https://github.com/pyro-ppl/pyro/assets/648532/45149fae-a72d-481a-aaf9-73262d50aa92\">\r\n\n", "before_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\n\n# import pkg_resources\n\n# -*- coding: utf-8 -*-\n#\n# Pyro documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 15 17:16:14 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.intersphinx\", #\n \"sphinx.ext.todo\", #\n \"sphinx.ext.mathjax\", #\n \"sphinx.ext.ifconfig\", #\n \"sphinx.ext.viewcode\", #\n \"sphinx.ext.githubpages\", #\n \"sphinx.ext.graphviz\", #\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n 'sphinx.ext.napoleon',\n]\n\n# Disable documentation inheritance so as to avoid inheriting\n# docstrings in a different format, e.g. when the parent class\n# is a PyTorch class.\n\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = u\"Pyro\"\ncopyright = u\"2017-2018, Uber Technologies, Inc\"\nauthor = u\"Uber AI Labs\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\nversion = \"\"\n\nif \"READTHEDOCS\" not in os.environ:\n # if developing locally, use pyro.__version__ as version\n from pyro import __version__ # noqaE402\n\n version = __version__\n\n# release version\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# do not prepend module name to functions\nadd_module_names = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# logo\nhtml_logo = \"_static/img/pyro_logo_wide.png\"\n\n# logo\nhtml_favicon = \"_static/img/favicon/favicon.ico\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_style = \"css/pyro.css\"\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyrodoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n # Disabled pdf builds to unblock readthedocs failed builds;\n # see https://github.com/pyro-ppl/pyro/issues/3248\n # (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n]\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyro\", u\"Pyro Documentation\", [author], 1)]\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Pyro\",\n u\"Pyro Documentation\",\n author,\n \"Pyro\",\n \"Deep Universal Probabilistic Programming.\",\n \"Miscellaneous\",\n ),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n \"funsor\": (\"http://funsor.pyro.ai/en/stable/\", None),\n \"opt_einsum\": (\"https://optimized-einsum.readthedocs.io/en/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"Bio\": (\"https://biopython.org/docs/latest/api/\", None),\n \"horovod\": (\"https://horovod.readthedocs.io/en/stable/\", None),\n \"graphviz\": (\"https://graphviz.readthedocs.io/en/stable/\", None),\n}\n\n# document class constructors (__init__ methods):\n\"\"\" comment out this functionality for now;\ndef skip(app, what, name, obj, skip, options):\n if name == \"__init__\":\n return False\n return skip\n\"\"\"\n\n\ndef setup(app):\n app.add_css_file(\"css/pyro.css\")\n\n\n# app.connect(\"autodoc-skip-member\", skip)\n\n\n# @jpchen's hack to get rtd builder to install latest pytorch\n# See similar line in the install section of .travis.yml\nif \"READTHEDOCS\" in os.environ:\n os.system(\"pip install numpy\")\n os.system(\n \"pip install torch==1.11.0+cpu torchvision==0.12.0+cpu \"\n \"-f https://download.pytorch.org/whl/torch_stable.html\"\n )\n", "path": "docs/source/conf.py"}], "after_files": [{"content": "# Copyright (c) 2017-2019 Uber Technologies, Inc.\n# SPDX-License-Identifier: Apache-2.0\n\nimport os\nimport sys\n\nimport sphinx_rtd_theme\n\n# import pkg_resources\n\n# -*- coding: utf-8 -*-\n#\n# Pyro documentation build configuration file, created by\n# sphinx-quickstart on Thu Jun 15 17:16:14 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nsys.path.insert(0, os.path.abspath(\"../..\"))\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.intersphinx\", #\n \"sphinx.ext.todo\", #\n \"sphinx.ext.mathjax\", #\n \"sphinx.ext.ifconfig\", #\n \"sphinx.ext.viewcode\", #\n \"sphinx.ext.githubpages\", #\n \"sphinx.ext.graphviz\", #\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.doctest\",\n 'sphinx.ext.napoleon',\n]\n\n# Disable documentation inheritance so as to avoid inheriting\n# docstrings in a different format, e.g. when the parent class\n# is a PyTorch class.\n\nautodoc_inherit_docstrings = False\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = \".rst\"\n\n# The master toctree document.\nmaster_doc = \"index\"\n\n# General information about the project.\nproject = u\"Pyro\"\ncopyright = u\"2017-2018, Uber Technologies, Inc\"\nauthor = u\"Uber AI Labs\"\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n\nversion = \"\"\n\nif \"READTHEDOCS\" not in os.environ:\n # if developing locally, use pyro.__version__ as version\n from pyro import __version__ # noqaE402\n\n version = __version__\n\n# release version\nrelease = version\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = \"en\"\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = []\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = \"sphinx\"\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n\n# do not prepend module name to functions\nadd_module_names = False\n\n# -- Options for HTML output ----------------------------------------------\n\n# logo\nhtml_logo = \"_static/img/pyro_logo_wide.png\"\n\n# logo\nhtml_favicon = \"_static/img/favicon/favicon.ico\"\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\nhtml_theme_path = [sphinx_rtd_theme.get_html_theme_path()]\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n\nhtml_theme_options = {\n \"navigation_depth\": 3,\n \"logo_only\": True,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"_static\"]\nhtml_style = \"css/pyro.css\"\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = \"Pyrodoc\"\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, \"Pyro.tex\", u\"Pyro Documentation\", u\"Uber AI Labs\", \"manual\"),\n]\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [(master_doc, \"pyro\", u\"Pyro Documentation\", [author], 1)]\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n \"Pyro\",\n u\"Pyro Documentation\",\n author,\n \"Pyro\",\n \"Deep Universal Probabilistic Programming.\",\n \"Miscellaneous\",\n ),\n]\n\n# Example configuration for intersphinx: refer to the Python standard library.\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3/\", None),\n \"torch\": (\"https://pytorch.org/docs/master/\", None),\n \"funsor\": (\"http://funsor.pyro.ai/en/stable/\", None),\n \"opt_einsum\": (\"https://optimized-einsum.readthedocs.io/en/stable/\", None),\n \"scipy\": (\"https://docs.scipy.org/doc/scipy/reference/\", None),\n \"Bio\": (\"https://biopython.org/docs/latest/api/\", None),\n \"horovod\": (\"https://horovod.readthedocs.io/en/stable/\", None),\n \"graphviz\": (\"https://graphviz.readthedocs.io/en/stable/\", None),\n}\n\n# document class constructors (__init__ methods):\n\"\"\" comment out this functionality for now;\ndef skip(app, what, name, obj, skip, options):\n if name == \"__init__\":\n return False\n return skip\n\"\"\"\n\n\ndef setup(app):\n app.add_css_file(\"css/pyro.css\")\n\n\n# app.connect(\"autodoc-skip-member\", skip)\n\n\n# @jpchen's hack to get rtd builder to install latest pytorch\n# See similar line in the install section of .travis.yml\nif \"READTHEDOCS\" in os.environ:\n os.system(\"pip install numpy\")\n os.system(\n \"pip install torch==1.11.0+cpu torchvision==0.12.0+cpu \"\n \"-f https://download.pytorch.org/whl/torch_stable.html\"\n )\n", "path": "docs/source/conf.py"}]}
| 2,788 | 173 |
gh_patches_debug_8174
|
rasdani/github-patches
|
git_diff
|
pwndbg__pwndbg-1872
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
fails to load on startup with "NameError: name 'CS_ARCH_RISCV' is not defined"
### Description
I just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.
```text
GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
Copyright (C) 2022 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<https://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word".
Traceback (most recent call last):
File "/home/username/repositories/hacking/pwndbg/gdbinit.py", line 71, in <module>
import pwndbg # noqa: F401
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py", line 9, in <module>
import pwndbg.commands
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py", line 17, in <module>
from pwndbg.heap.ptmalloc import DebugSymsHeap
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py", line 19, in <module>
import pwndbg.disasm
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py", line 40, in <module>
"rv32": CS_ARCH_RISCV,
NameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?
```
### Steps to reproduce
1. run setup.sh
2. run gdb
3. ???
4. no profit!
### My setup
OS: Ubuntu Mate 22.04
pwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3
gdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
~/.gdbinit
```text
# prevent history from being all over the filesystem
set history save on
set history filename ~/.gdb_history
set history size 8192
set history remove-duplicates unlimited
# leave history expansion off (! character)
# prevent brain from exploding
set disassembly-flavor intel
# show registers, stack and instruction pointer when stopping
# not required with gef/pwndbg
# define hook-stop
# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags
# x /64wx $rsp
# x /3i $rip
# end
# load extensions
# source ~/repositories/hacking/peda/peda.py
source ~/repositories/hacking/exploitable/exploitable/exploitable.py
# source ~/repositories/hacking/gef/gef.py
source ~/repositories/hacking/pwndbg/gdbinit.py
```
fails to load on startup with "NameError: name 'CS_ARCH_RISCV' is not defined"
### Description
I just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.
```text
GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
Copyright (C) 2022 Free Software Foundation, Inc.
License GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>
This is free software: you are free to change and redistribute it.
There is NO WARRANTY, to the extent permitted by law.
Type "show copying" and "show warranty" for details.
This GDB was configured as "x86_64-linux-gnu".
Type "show configuration" for configuration details.
For bug reporting instructions, please see:
<https://www.gnu.org/software/gdb/bugs/>.
Find the GDB manual and other documentation resources online at:
<http://www.gnu.org/software/gdb/documentation/>.
For help, type "help".
Type "apropos word" to search for commands related to "word".
Traceback (most recent call last):
File "/home/username/repositories/hacking/pwndbg/gdbinit.py", line 71, in <module>
import pwndbg # noqa: F401
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py", line 9, in <module>
import pwndbg.commands
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py", line 17, in <module>
from pwndbg.heap.ptmalloc import DebugSymsHeap
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py", line 19, in <module>
import pwndbg.disasm
File "/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py", line 40, in <module>
"rv32": CS_ARCH_RISCV,
NameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?
```
### Steps to reproduce
1. run setup.sh
2. run gdb
3. ???
4. no profit!
### My setup
OS: Ubuntu Mate 22.04
pwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3
gdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90
~/.gdbinit
```text
# prevent history from being all over the filesystem
set history save on
set history filename ~/.gdb_history
set history size 8192
set history remove-duplicates unlimited
# leave history expansion off (! character)
# prevent brain from exploding
set disassembly-flavor intel
# show registers, stack and instruction pointer when stopping
# not required with gef/pwndbg
# define hook-stop
# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags
# x /64wx $rsp
# x /3i $rip
# end
# load extensions
# source ~/repositories/hacking/peda/peda.py
source ~/repositories/hacking/exploitable/exploitable/exploitable.py
# source ~/repositories/hacking/gef/gef.py
source ~/repositories/hacking/pwndbg/gdbinit.py
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `gdbinit.py`
Content:
```
1 from __future__ import annotations
2
3 import cProfile
4 import glob
5 import locale
6 import os
7 import site
8 import sys
9 import time
10 from glob import glob
11 from os import environ
12 from os import path
13
14 _profiler = cProfile.Profile()
15
16 _start_time = None
17 if environ.get("PWNDBG_PROFILE") == "1":
18 _start_time = time.time()
19 _profiler.enable()
20
21 # Get virtualenv's site-packages path
22 venv_path = os.environ.get("PWNDBG_VENV_PATH")
23 if venv_path == "PWNDBG_PLEASE_SKIP_VENV":
24 pass
25 else:
26 directory, file = path.split(__file__)
27 directory = path.expanduser(directory)
28 directory = path.abspath(directory)
29
30 if not venv_path:
31 venv_path = os.path.join(directory, ".venv")
32
33 if not os.path.exists(venv_path):
34 print(f"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh")
35 sys.exit(1)
36
37 site_pkgs_path = glob(os.path.join(venv_path, "lib/*/site-packages"))[0]
38
39 # add virtualenv's site-packages to sys.path and run .pth files
40 site.addsitedir(site_pkgs_path)
41
42 # remove existing, system-level site-packages from sys.path
43 for site_packages in site.getsitepackages():
44 if site_packages in sys.path:
45 sys.path.remove(site_packages)
46
47 # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)
48 bin_path = os.path.join(venv_path, "bin")
49 os.environ["PATH"] = bin_path + os.pathsep + os.environ.get("PATH")
50
51 # Add gdb-pt-dump directory to sys.path so it can be imported
52 gdbpt = path.join(directory, "gdb-pt-dump")
53 sys.path.append(directory)
54 sys.path.append(gdbpt)
55
56 # warn if the user has different encoding than utf-8
57 encoding = locale.getpreferredencoding()
58
59 if encoding != "UTF-8":
60 print("******")
61 print(f"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.")
62 print("You might try launching GDB with:")
63 print(" LC_CTYPE=C.UTF-8 gdb")
64 print(
65 "If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command"
66 )
67 print("******")
68
69 environ["PWNLIB_NOTERM"] = "1"
70
71 import pwndbg # noqa: F401
72 import pwndbg.profiling
73
74 pwndbg.profiling.init(_profiler, _start_time)
75 if environ.get("PWNDBG_PROFILE") == "1":
76 pwndbg.profiling.profiler.stop("pwndbg-load.pstats")
77 pwndbg.profiling.profiler.start()
78
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/gdbinit.py b/gdbinit.py
--- a/gdbinit.py
+++ b/gdbinit.py
@@ -50,8 +50,13 @@
# Add gdb-pt-dump directory to sys.path so it can be imported
gdbpt = path.join(directory, "gdb-pt-dump")
- sys.path.append(directory)
- sys.path.append(gdbpt)
+ sys.path.insert(0, directory)
+ sys.path.insert(1, gdbpt)
+
+ # Push virtualenv's site-packages to the front
+ sys.path.remove(site_pkgs_path)
+ sys.path.insert(2, site_pkgs_path)
+
# warn if the user has different encoding than utf-8
encoding = locale.getpreferredencoding()
|
{"golden_diff": "diff --git a/gdbinit.py b/gdbinit.py\n--- a/gdbinit.py\n+++ b/gdbinit.py\n@@ -50,8 +50,13 @@\n \n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n- sys.path.append(directory)\n- sys.path.append(gdbpt)\n+ sys.path.insert(0, directory)\n+ sys.path.insert(1, gdbpt)\n+\n+ # Push virtualenv's site-packages to the front\n+ sys.path.remove(site_pkgs_path)\n+ sys.path.insert(2, site_pkgs_path)\n+\n \n # warn if the user has different encoding than utf-8\n encoding = locale.getpreferredencoding()\n", "issue": "fails to load on startup with \"NameError: name 'CS_ARCH_RISCV' is not defined\"\n### Description\r\nI just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.\r\n\r\n```text\r\nGNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\nCopyright (C) 2022 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law.\r\nType \"show copying\" and \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<https://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n <http://www.gnu.org/software/gdb/documentation/>.\r\n\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\".\r\nTraceback (most recent call last):\r\n File \"/home/username/repositories/hacking/pwndbg/gdbinit.py\", line 71, in <module>\r\n import pwndbg # noqa: F401\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py\", line 9, in <module>\r\n import pwndbg.commands\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py\", line 17, in <module>\r\n from pwndbg.heap.ptmalloc import DebugSymsHeap\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py\", line 19, in <module>\r\n import pwndbg.disasm\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py\", line 40, in <module>\r\n \"rv32\": CS_ARCH_RISCV,\r\nNameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. run setup.sh\r\n2. run gdb\r\n3. ???\r\n4. no profit!\r\n\r\n### My setup\r\n\r\nOS: Ubuntu Mate 22.04\r\npwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3\r\ngdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\n\r\n~/.gdbinit\r\n```text\r\n# prevent history from being all over the filesystem\r\nset history save on\r\nset history filename ~/.gdb_history\r\nset history size 8192\r\nset history remove-duplicates unlimited\r\n# leave history expansion off (! character)\r\n\r\n# prevent brain from exploding\r\nset disassembly-flavor intel\r\n\r\n# show registers, stack and instruction pointer when stopping\r\n\r\n# not required with gef/pwndbg\r\n# define hook-stop\r\n# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags\r\n# x /64wx $rsp\r\n# x /3i $rip\r\n# end\r\n\r\n# load extensions\r\n# source ~/repositories/hacking/peda/peda.py\r\nsource ~/repositories/hacking/exploitable/exploitable/exploitable.py\r\n# source ~/repositories/hacking/gef/gef.py\r\nsource ~/repositories/hacking/pwndbg/gdbinit.py\r\n```\nfails to load on startup with \"NameError: name 'CS_ARCH_RISCV' is not defined\"\n### Description\r\nI just updated pwndbg and ran setup.sh. Afterwards gdb just fails to load pwndbg.\r\n\r\n```text\r\nGNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\nCopyright (C) 2022 Free Software Foundation, Inc.\r\nLicense GPLv3+: GNU GPL version 3 or later <http://gnu.org/licenses/gpl.html>\r\nThis is free software: you are free to change and redistribute it.\r\nThere is NO WARRANTY, to the extent permitted by law.\r\nType \"show copying\" and \"show warranty\" for details.\r\nThis GDB was configured as \"x86_64-linux-gnu\".\r\nType \"show configuration\" for configuration details.\r\nFor bug reporting instructions, please see:\r\n<https://www.gnu.org/software/gdb/bugs/>.\r\nFind the GDB manual and other documentation resources online at:\r\n <http://www.gnu.org/software/gdb/documentation/>.\r\n\r\nFor help, type \"help\".\r\nType \"apropos word\" to search for commands related to \"word\".\r\nTraceback (most recent call last):\r\n File \"/home/username/repositories/hacking/pwndbg/gdbinit.py\", line 71, in <module>\r\n import pwndbg # noqa: F401\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/__init__.py\", line 9, in <module>\r\n import pwndbg.commands\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/commands/__init__.py\", line 17, in <module>\r\n from pwndbg.heap.ptmalloc import DebugSymsHeap\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/heap/ptmalloc.py\", line 19, in <module>\r\n import pwndbg.disasm\r\n File \"/home/username/repositories/hacking/pwndbg/.venv/lib/python3.10/site-packages/pwndbg/disasm/__init__.py\", line 40, in <module>\r\n \"rv32\": CS_ARCH_RISCV,\r\nNameError: name 'CS_ARCH_RISCV' is not defined. Did you mean: 'CS_ARCH_MIPS'?\r\n```\r\n\r\n### Steps to reproduce\r\n\r\n1. run setup.sh\r\n2. run gdb\r\n3. ???\r\n4. no profit!\r\n\r\n### My setup\r\n\r\nOS: Ubuntu Mate 22.04\r\npwndbg: 0fbe6cf47f9893af482626aa6fb30ea68f0c30d3\r\ngdb: GNU gdb (Ubuntu 12.0.90-0ubuntu1) 12.0.90\r\n\r\n~/.gdbinit\r\n```text\r\n# prevent history from being all over the filesystem\r\nset history save on\r\nset history filename ~/.gdb_history\r\nset history size 8192\r\nset history remove-duplicates unlimited\r\n# leave history expansion off (! character)\r\n\r\n# prevent brain from exploding\r\nset disassembly-flavor intel\r\n\r\n# show registers, stack and instruction pointer when stopping\r\n\r\n# not required with gef/pwndbg\r\n# define hook-stop\r\n# info registers rax rbx rcx rdx rsi rdi rbp rsp rip eflags\r\n# x /64wx $rsp\r\n# x /3i $rip\r\n# end\r\n\r\n# load extensions\r\n# source ~/repositories/hacking/peda/peda.py\r\nsource ~/repositories/hacking/exploitable/exploitable/exploitable.py\r\n# source ~/repositories/hacking/gef/gef.py\r\nsource ~/repositories/hacking/pwndbg/gdbinit.py\r\n```\n", "before_files": [{"content": "from __future__ import annotations\n\nimport cProfile\nimport glob\nimport locale\nimport os\nimport site\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif venv_path == \"PWNDBG_PLEASE_SKIP_VENV\":\n pass\nelse:\n directory, file = path.split(__file__)\n directory = path.expanduser(directory)\n directory = path.abspath(directory)\n\n if not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\n if not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\n site_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n # add virtualenv's site-packages to sys.path and run .pth files\n site.addsitedir(site_pkgs_path)\n\n # remove existing, system-level site-packages from sys.path\n for site_packages in site.getsitepackages():\n if site_packages in sys.path:\n sys.path.remove(site_packages)\n\n # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\n bin_path = os.path.join(venv_path, \"bin\")\n os.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n sys.path.append(directory)\n sys.path.append(gdbpt)\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport cProfile\nimport glob\nimport locale\nimport os\nimport site\nimport sys\nimport time\nfrom glob import glob\nfrom os import environ\nfrom os import path\n\n_profiler = cProfile.Profile()\n\n_start_time = None\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n _start_time = time.time()\n _profiler.enable()\n\n# Get virtualenv's site-packages path\nvenv_path = os.environ.get(\"PWNDBG_VENV_PATH\")\nif venv_path == \"PWNDBG_PLEASE_SKIP_VENV\":\n pass\nelse:\n directory, file = path.split(__file__)\n directory = path.expanduser(directory)\n directory = path.abspath(directory)\n\n if not venv_path:\n venv_path = os.path.join(directory, \".venv\")\n\n if not os.path.exists(venv_path):\n print(f\"Cannot find Pwndbg virtualenv directory: {venv_path}: please re-run setup.sh\")\n sys.exit(1)\n\n site_pkgs_path = glob(os.path.join(venv_path, \"lib/*/site-packages\"))[0]\n\n # add virtualenv's site-packages to sys.path and run .pth files\n site.addsitedir(site_pkgs_path)\n\n # remove existing, system-level site-packages from sys.path\n for site_packages in site.getsitepackages():\n if site_packages in sys.path:\n sys.path.remove(site_packages)\n\n # Set virtualenv's bin path (needed for utility tools like ropper, pwntools etc)\n bin_path = os.path.join(venv_path, \"bin\")\n os.environ[\"PATH\"] = bin_path + os.pathsep + os.environ.get(\"PATH\")\n\n # Add gdb-pt-dump directory to sys.path so it can be imported\n gdbpt = path.join(directory, \"gdb-pt-dump\")\n sys.path.insert(0, directory)\n sys.path.insert(1, gdbpt)\n\n # Push virtualenv's site-packages to the front\n sys.path.remove(site_pkgs_path)\n sys.path.insert(2, site_pkgs_path)\n\n\n# warn if the user has different encoding than utf-8\nencoding = locale.getpreferredencoding()\n\nif encoding != \"UTF-8\":\n print(\"******\")\n print(f\"Your encoding ({encoding}) is different than UTF-8. pwndbg might not work properly.\")\n print(\"You might try launching GDB with:\")\n print(\" LC_CTYPE=C.UTF-8 gdb\")\n print(\n \"If that does not work, make sure that en_US.UTF-8 is uncommented in /etc/locale.gen and that you called `locale-gen` command\"\n )\n print(\"******\")\n\nenviron[\"PWNLIB_NOTERM\"] = \"1\"\n\nimport pwndbg # noqa: F401\nimport pwndbg.profiling\n\npwndbg.profiling.init(_profiler, _start_time)\nif environ.get(\"PWNDBG_PROFILE\") == \"1\":\n pwndbg.profiling.profiler.stop(\"pwndbg-load.pstats\")\n pwndbg.profiling.profiler.start()\n", "path": "gdbinit.py"}]}
| 2,721 | 170 |
gh_patches_debug_4848
|
rasdani/github-patches
|
git_diff
|
enthought__chaco-728
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
TypeError when running stock prices examples
This error was discovered when running the `examples/demo/financial/stock_prices.py` example.
In order to reproduce, simply load the example and start zooming out. This was observed on python 3.6 with wx toolkit.
```
>python stock_prices.py
c:\users\rporuri\work\github\ets\enable\kiva\agg\plat_support.py:188: wxPyDeprecationWarning: Call to deprecated item. Use GetHandle instead.
self.draw(window_dc.GetHDC(), x, y, width, height)
Exception occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1614674109.0105073, 1657874109.0105073))
Traceback (most recent call last):
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\_trait_event_notifier.py", line 122, in __call__
self.dispatcher(handler, event)
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\observe.py", line 26, in dispatch_same
handler(event)
File "stock_prices.py", line 147, in _plot_range_handler
low, high = event
TypeError: 'TraitChangeEvent' object is not iterable
Exception occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1593074109.0105073, 1679474109.0105073))
Traceback (most recent call last):
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\_trait_event_notifier.py", line 122, in __call__
self.dispatcher(handler, event)
File "C:\Users\rporuri\.edm\envs\enable-test-3.6-wx\lib\site-packages\traits\observation\observe.py", line 26, in dispatch_same
handler(event)
File "stock_prices.py", line 147, in _plot_range_handler
low, high = event
TypeError: 'TraitChangeEvent' object is not iterable
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `examples/demo/financial/stock_prices.py`
Content:
```
1 """
2 Implementation of a standard financial plot visualization using Chaco
3 renderers and scales.
4
5 In the main price plot area, mouse wheel zooms and mouse drag pans (if
6 the plot is not at the edge of the time series data). In the bottom
7 overview plot area, right-click-drag selects a range of times to display
8 on the top two plots. Once a region is selected, it can be moved
9 around by left-dragging or resized by left-dragging one of its
10 edges.
11 """
12
13 # Major library imports
14 from numpy import abs, cumprod, linspace, random
15 import time
16
17 from enable.example_support import DemoFrame, demo_main
18
19 # Enthought library imports
20 from enable.api import Window
21
22 # Chaco imports
23 from chaco.api import (
24 ArrayDataSource,
25 BarPlot,
26 DataRange1D,
27 LinePlot,
28 LinearMapper,
29 VPlotContainer,
30 PlotAxis,
31 FilledLinePlot,
32 add_default_grids,
33 )
34 from chaco.tools.api import (
35 PanTool,
36 ZoomTool,
37 RangeSelection,
38 RangeSelectionOverlay,
39 )
40
41 from chaco.scales.api import CalendarScaleSystem
42 from chaco.scales_tick_generator import ScalesTickGenerator
43
44
45 def create_dates(numpoints, units="days"):
46 """Returns **numpoints** number of dates that evenly bracket the current
47 date and time. **units** should be one of "weeks", "days", "hours"
48 "minutes", or "seconds".
49 """
50 units_map = {
51 "weeks": 7 * 24 * 3600,
52 "days": 24 * 3600,
53 "hours": 3600,
54 "minutes": 60,
55 "seconds": 1,
56 }
57 now = time.time()
58 dt = units_map[units]
59 dates = linspace(now, now + numpoints * dt, numpoints)
60 return dates
61
62
63 class PlotFrame(DemoFrame):
64 def _create_price_plots(self, times, prices, mini_height=75):
65 """Creates the two plots of prices and returns them. One of the
66 plots can be zoomed and panned, and the other plot (smaller) always
67 shows the full data.
68
69 *dates* and *prices* are two data sources.
70 """
71
72 # Create the price plot
73 price_plot = FilledLinePlot(
74 index=times,
75 value=prices,
76 index_mapper=LinearMapper(range=DataRange1D(times)),
77 value_mapper=LinearMapper(range=DataRange1D(prices)),
78 edge_color="blue",
79 face_color="paleturquoise",
80 bgcolor="white",
81 border_visible=True,
82 )
83
84 # Add pan and zoom
85 price_plot.tools.append(
86 PanTool(
87 price_plot,
88 constrain=True,
89 constrain_direction="x",
90 restrict_to_data=True,
91 )
92 )
93 price_plot.overlays.append(
94 ZoomTool(
95 price_plot,
96 drag_button="right",
97 always_on=True,
98 tool_mode="range",
99 axis="index",
100 max_zoom_out_factor=1.0,
101 x_min_zoom_factor=float(1e-3),
102 )
103 )
104
105 # Create the miniplot
106 miniplot = LinePlot(
107 index=times,
108 value=prices,
109 index_mapper=LinearMapper(range=DataRange1D(times)),
110 value_mapper=LinearMapper(range=DataRange1D(prices)),
111 color="black",
112 border_visible=True,
113 bgcolor="white",
114 height=mini_height,
115 resizable="h",
116 )
117
118 # Add a range overlay to the miniplot that is hooked up to the range
119 # of the main price_plot
120 range_tool = RangeSelection(miniplot)
121 miniplot.tools.append(range_tool)
122 range_overlay = RangeSelectionOverlay(
123 miniplot, metadata_name="selections"
124 )
125 miniplot.overlays.append(range_overlay)
126 range_tool.observe(self._range_selection_handler, "selection")
127
128 # Attach a handler that sets the tool when the plot's index range changes
129 self.range_tool = range_tool
130 price_plot.index_range.observe(self._plot_range_handler, "updated")
131
132 return price_plot, miniplot
133
134 def _range_selection_handler(self, event):
135 range_selection_event = event.new
136 # The event obj should be a tuple (low, high) in data space
137 if range_selection_event is not None:
138 low, high = range_selection_event
139 self.price_plot.index_range.low = low
140 self.price_plot.index_range.high = high
141 else:
142 self.price_plot.index_range.set_bounds("auto", "auto")
143
144 def _plot_range_handler(self, event):
145 plot_range_event = event.new
146 if plot_range_event is not None:
147 low, high = event
148 if "auto" not in (low, high):
149 self.range_tool.selection = (low, high)
150
151 def _create_vol_plot(self, times, volumes, height=100):
152 "Creates and returns the volume plot"
153 index_range = self.price_plot.index_range
154 vol_plot = BarPlot(
155 index=times,
156 value=volumes,
157 index_mapper=LinearMapper(range=index_range),
158 value_mapper=LinearMapper(range=DataRange1D(volumes)),
159 line_color="transparent",
160 fill_color="black",
161 bar_width=1.0,
162 bar_width_type="screen",
163 antialias=False,
164 height=100,
165 resizable="h",
166 bgcolor="white",
167 border_visible=True,
168 )
169 vol_plot.tools.append(
170 PanTool(vol_plot, constrain=True, constrain_direction="x")
171 )
172 return vol_plot
173
174 def _create_component(self):
175
176 # Create the data and datasource objects
177 # In order for the date axis to work, the index data points need to
178 # be in units of seconds since the epoch. This is because we are using
179 # the CalendarScaleSystem, whose formatters interpret the numerical values
180 # as seconds since the epoch.
181 numpoints = 500
182 index = create_dates(numpoints)
183 returns = random.lognormal(0.01, 0.1, size=numpoints)
184 price = 100.0 * cumprod(returns)
185 volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)
186
187 time_ds = ArrayDataSource(index)
188 vol_ds = ArrayDataSource(volume, sort_order="none")
189 price_ds = ArrayDataSource(price, sort_order="none")
190
191 # Create the price plots
192 price_plot, mini_plot = self._create_price_plots(time_ds, price_ds)
193 price_plot.index_mapper.domain_limits = (index[0], index[-1])
194 self.price_plot = price_plot
195 self.mini_plot = mini_plot
196
197 # Create the volume plot
198 vol_plot = self._create_vol_plot(time_ds, vol_ds)
199 vol_plot.index_mapper.domain_limits = (index[0], index[-1])
200
201 # Set the plot's bottom axis to use the Scales ticking system
202 ticker = ScalesTickGenerator(scale=CalendarScaleSystem())
203 for plot in price_plot, mini_plot, vol_plot:
204 bottom_axis = PlotAxis(
205 plot, orientation="bottom", tick_generator=ticker
206 )
207 plot.overlays.append(bottom_axis)
208 plot.overlays.append(PlotAxis(plot, orientation="left"))
209 hgrid, vgrid = add_default_grids(plot)
210 vgrid.tick_generator = bottom_axis.tick_generator
211
212 container = VPlotContainer(
213 bgcolor="lightgray", spacing=40, padding=50, fill_padding=False
214 )
215 container.add(mini_plot, vol_plot, price_plot)
216
217 return container
218
219
220 if __name__ == "__main__":
221 # Save demo so that it doesn't get garbage collected when run within
222 # existing event loop (i.e. from ipython).
223 demo = demo_main(
224 PlotFrame, size=(800, 600), title="Stock price and volume"
225 )
226
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/examples/demo/financial/stock_prices.py b/examples/demo/financial/stock_prices.py
--- a/examples/demo/financial/stock_prices.py
+++ b/examples/demo/financial/stock_prices.py
@@ -144,7 +144,7 @@
def _plot_range_handler(self, event):
plot_range_event = event.new
if plot_range_event is not None:
- low, high = event
+ low, high = plot_range_event
if "auto" not in (low, high):
self.range_tool.selection = (low, high)
|
{"golden_diff": "diff --git a/examples/demo/financial/stock_prices.py b/examples/demo/financial/stock_prices.py\n--- a/examples/demo/financial/stock_prices.py\n+++ b/examples/demo/financial/stock_prices.py\n@@ -144,7 +144,7 @@\n def _plot_range_handler(self, event):\n plot_range_event = event.new\n if plot_range_event is not None:\n- low, high = event\n+ low, high = plot_range_event\n if \"auto\" not in (low, high):\n self.range_tool.selection = (low, high)\n", "issue": "TypeError when running stock prices examples\nThis error was discovered when running the `examples/demo/financial/stock_prices.py` example.\r\n\r\nIn order to reproduce, simply load the example and start zooming out. This was observed on python 3.6 with wx toolkit.\r\n\r\n```\r\n>python stock_prices.py\r\nc:\\users\\rporuri\\work\\github\\ets\\enable\\kiva\\agg\\plat_support.py:188: wxPyDeprecationWarning: Call to deprecated item. Use GetHandle instead.\r\n self.draw(window_dc.GetHDC(), x, y, width, height)\r\nException occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1614674109.0105073, 1657874109.0105073))\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\_trait_event_notifier.py\", line 122, in __call__\r\n self.dispatcher(handler, event)\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\observe.py\", line 26, in dispatch_same\r\n handler(event)\r\n File \"stock_prices.py\", line 147, in _plot_range_handler\r\n low, high = event\r\nTypeError: 'TraitChangeEvent' object is not iterable\r\nException occurred in traits notification handler for event object: TraitChangeEvent(object=<chaco.data_range_1d.DataRange1D object at 0x000001C06339C518>, name='updated', old=<undefined>, new=(1593074109.0105073, 1679474109.0105073))\r\nTraceback (most recent call last):\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\_trait_event_notifier.py\", line 122, in __call__\r\n self.dispatcher(handler, event)\r\n File \"C:\\Users\\rporuri\\.edm\\envs\\enable-test-3.6-wx\\lib\\site-packages\\traits\\observation\\observe.py\", line 26, in dispatch_same\r\n handler(event)\r\n File \"stock_prices.py\", line 147, in _plot_range_handler\r\n low, high = event\r\nTypeError: 'TraitChangeEvent' object is not iterable\r\n```\n", "before_files": [{"content": "\"\"\"\nImplementation of a standard financial plot visualization using Chaco\nrenderers and scales.\n\nIn the main price plot area, mouse wheel zooms and mouse drag pans (if\nthe plot is not at the edge of the time series data). In the bottom\noverview plot area, right-click-drag selects a range of times to display\non the top two plots. Once a region is selected, it can be moved\naround by left-dragging or resized by left-dragging one of its\nedges.\n\"\"\"\n\n# Major library imports\nfrom numpy import abs, cumprod, linspace, random\nimport time\n\nfrom enable.example_support import DemoFrame, demo_main\n\n# Enthought library imports\nfrom enable.api import Window\n\n# Chaco imports\nfrom chaco.api import (\n ArrayDataSource,\n BarPlot,\n DataRange1D,\n LinePlot,\n LinearMapper,\n VPlotContainer,\n PlotAxis,\n FilledLinePlot,\n add_default_grids,\n)\nfrom chaco.tools.api import (\n PanTool,\n ZoomTool,\n RangeSelection,\n RangeSelectionOverlay,\n)\n\nfrom chaco.scales.api import CalendarScaleSystem\nfrom chaco.scales_tick_generator import ScalesTickGenerator\n\n\ndef create_dates(numpoints, units=\"days\"):\n \"\"\"Returns **numpoints** number of dates that evenly bracket the current\n date and time. **units** should be one of \"weeks\", \"days\", \"hours\"\n \"minutes\", or \"seconds\".\n \"\"\"\n units_map = {\n \"weeks\": 7 * 24 * 3600,\n \"days\": 24 * 3600,\n \"hours\": 3600,\n \"minutes\": 60,\n \"seconds\": 1,\n }\n now = time.time()\n dt = units_map[units]\n dates = linspace(now, now + numpoints * dt, numpoints)\n return dates\n\n\nclass PlotFrame(DemoFrame):\n def _create_price_plots(self, times, prices, mini_height=75):\n \"\"\"Creates the two plots of prices and returns them. One of the\n plots can be zoomed and panned, and the other plot (smaller) always\n shows the full data.\n\n *dates* and *prices* are two data sources.\n \"\"\"\n\n # Create the price plot\n price_plot = FilledLinePlot(\n index=times,\n value=prices,\n index_mapper=LinearMapper(range=DataRange1D(times)),\n value_mapper=LinearMapper(range=DataRange1D(prices)),\n edge_color=\"blue\",\n face_color=\"paleturquoise\",\n bgcolor=\"white\",\n border_visible=True,\n )\n\n # Add pan and zoom\n price_plot.tools.append(\n PanTool(\n price_plot,\n constrain=True,\n constrain_direction=\"x\",\n restrict_to_data=True,\n )\n )\n price_plot.overlays.append(\n ZoomTool(\n price_plot,\n drag_button=\"right\",\n always_on=True,\n tool_mode=\"range\",\n axis=\"index\",\n max_zoom_out_factor=1.0,\n x_min_zoom_factor=float(1e-3),\n )\n )\n\n # Create the miniplot\n miniplot = LinePlot(\n index=times,\n value=prices,\n index_mapper=LinearMapper(range=DataRange1D(times)),\n value_mapper=LinearMapper(range=DataRange1D(prices)),\n color=\"black\",\n border_visible=True,\n bgcolor=\"white\",\n height=mini_height,\n resizable=\"h\",\n )\n\n # Add a range overlay to the miniplot that is hooked up to the range\n # of the main price_plot\n range_tool = RangeSelection(miniplot)\n miniplot.tools.append(range_tool)\n range_overlay = RangeSelectionOverlay(\n miniplot, metadata_name=\"selections\"\n )\n miniplot.overlays.append(range_overlay)\n range_tool.observe(self._range_selection_handler, \"selection\")\n\n # Attach a handler that sets the tool when the plot's index range changes\n self.range_tool = range_tool\n price_plot.index_range.observe(self._plot_range_handler, \"updated\")\n\n return price_plot, miniplot\n\n def _range_selection_handler(self, event):\n range_selection_event = event.new\n # The event obj should be a tuple (low, high) in data space\n if range_selection_event is not None:\n low, high = range_selection_event\n self.price_plot.index_range.low = low\n self.price_plot.index_range.high = high\n else:\n self.price_plot.index_range.set_bounds(\"auto\", \"auto\")\n\n def _plot_range_handler(self, event):\n plot_range_event = event.new\n if plot_range_event is not None:\n low, high = event\n if \"auto\" not in (low, high):\n self.range_tool.selection = (low, high)\n\n def _create_vol_plot(self, times, volumes, height=100):\n \"Creates and returns the volume plot\"\n index_range = self.price_plot.index_range\n vol_plot = BarPlot(\n index=times,\n value=volumes,\n index_mapper=LinearMapper(range=index_range),\n value_mapper=LinearMapper(range=DataRange1D(volumes)),\n line_color=\"transparent\",\n fill_color=\"black\",\n bar_width=1.0,\n bar_width_type=\"screen\",\n antialias=False,\n height=100,\n resizable=\"h\",\n bgcolor=\"white\",\n border_visible=True,\n )\n vol_plot.tools.append(\n PanTool(vol_plot, constrain=True, constrain_direction=\"x\")\n )\n return vol_plot\n\n def _create_component(self):\n\n # Create the data and datasource objects\n # In order for the date axis to work, the index data points need to\n # be in units of seconds since the epoch. This is because we are using\n # the CalendarScaleSystem, whose formatters interpret the numerical values\n # as seconds since the epoch.\n numpoints = 500\n index = create_dates(numpoints)\n returns = random.lognormal(0.01, 0.1, size=numpoints)\n price = 100.0 * cumprod(returns)\n volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)\n\n time_ds = ArrayDataSource(index)\n vol_ds = ArrayDataSource(volume, sort_order=\"none\")\n price_ds = ArrayDataSource(price, sort_order=\"none\")\n\n # Create the price plots\n price_plot, mini_plot = self._create_price_plots(time_ds, price_ds)\n price_plot.index_mapper.domain_limits = (index[0], index[-1])\n self.price_plot = price_plot\n self.mini_plot = mini_plot\n\n # Create the volume plot\n vol_plot = self._create_vol_plot(time_ds, vol_ds)\n vol_plot.index_mapper.domain_limits = (index[0], index[-1])\n\n # Set the plot's bottom axis to use the Scales ticking system\n ticker = ScalesTickGenerator(scale=CalendarScaleSystem())\n for plot in price_plot, mini_plot, vol_plot:\n bottom_axis = PlotAxis(\n plot, orientation=\"bottom\", tick_generator=ticker\n )\n plot.overlays.append(bottom_axis)\n plot.overlays.append(PlotAxis(plot, orientation=\"left\"))\n hgrid, vgrid = add_default_grids(plot)\n vgrid.tick_generator = bottom_axis.tick_generator\n\n container = VPlotContainer(\n bgcolor=\"lightgray\", spacing=40, padding=50, fill_padding=False\n )\n container.add(mini_plot, vol_plot, price_plot)\n\n return container\n\n\nif __name__ == \"__main__\":\n # Save demo so that it doesn't get garbage collected when run within\n # existing event loop (i.e. from ipython).\n demo = demo_main(\n PlotFrame, size=(800, 600), title=\"Stock price and volume\"\n )\n", "path": "examples/demo/financial/stock_prices.py"}], "after_files": [{"content": "\"\"\"\nImplementation of a standard financial plot visualization using Chaco\nrenderers and scales.\n\nIn the main price plot area, mouse wheel zooms and mouse drag pans (if\nthe plot is not at the edge of the time series data). In the bottom\noverview plot area, right-click-drag selects a range of times to display\non the top two plots. Once a region is selected, it can be moved\naround by left-dragging or resized by left-dragging one of its\nedges.\n\"\"\"\n\n# Major library imports\nfrom numpy import abs, cumprod, linspace, random\nimport time\n\nfrom enable.example_support import DemoFrame, demo_main\n\n# Enthought library imports\nfrom enable.api import Window\n\n# Chaco imports\nfrom chaco.api import (\n ArrayDataSource,\n BarPlot,\n DataRange1D,\n LinePlot,\n LinearMapper,\n VPlotContainer,\n PlotAxis,\n FilledLinePlot,\n add_default_grids,\n)\nfrom chaco.tools.api import (\n PanTool,\n ZoomTool,\n RangeSelection,\n RangeSelectionOverlay,\n)\n\nfrom chaco.scales.api import CalendarScaleSystem\nfrom chaco.scales_tick_generator import ScalesTickGenerator\n\n\ndef create_dates(numpoints, units=\"days\"):\n \"\"\"Returns **numpoints** number of dates that evenly bracket the current\n date and time. **units** should be one of \"weeks\", \"days\", \"hours\"\n \"minutes\", or \"seconds\".\n \"\"\"\n units_map = {\n \"weeks\": 7 * 24 * 3600,\n \"days\": 24 * 3600,\n \"hours\": 3600,\n \"minutes\": 60,\n \"seconds\": 1,\n }\n now = time.time()\n dt = units_map[units]\n dates = linspace(now, now + numpoints * dt, numpoints)\n return dates\n\n\nclass PlotFrame(DemoFrame):\n def _create_price_plots(self, times, prices, mini_height=75):\n \"\"\"Creates the two plots of prices and returns them. One of the\n plots can be zoomed and panned, and the other plot (smaller) always\n shows the full data.\n\n *dates* and *prices* are two data sources.\n \"\"\"\n\n # Create the price plot\n price_plot = FilledLinePlot(\n index=times,\n value=prices,\n index_mapper=LinearMapper(range=DataRange1D(times)),\n value_mapper=LinearMapper(range=DataRange1D(prices)),\n edge_color=\"blue\",\n face_color=\"paleturquoise\",\n bgcolor=\"white\",\n border_visible=True,\n )\n\n # Add pan and zoom\n price_plot.tools.append(\n PanTool(\n price_plot,\n constrain=True,\n constrain_direction=\"x\",\n restrict_to_data=True,\n )\n )\n price_plot.overlays.append(\n ZoomTool(\n price_plot,\n drag_button=\"right\",\n always_on=True,\n tool_mode=\"range\",\n axis=\"index\",\n max_zoom_out_factor=1.0,\n x_min_zoom_factor=float(1e-3),\n )\n )\n\n # Create the miniplot\n miniplot = LinePlot(\n index=times,\n value=prices,\n index_mapper=LinearMapper(range=DataRange1D(times)),\n value_mapper=LinearMapper(range=DataRange1D(prices)),\n color=\"black\",\n border_visible=True,\n bgcolor=\"white\",\n height=mini_height,\n resizable=\"h\",\n )\n\n # Add a range overlay to the miniplot that is hooked up to the range\n # of the main price_plot\n range_tool = RangeSelection(miniplot)\n miniplot.tools.append(range_tool)\n range_overlay = RangeSelectionOverlay(\n miniplot, metadata_name=\"selections\"\n )\n miniplot.overlays.append(range_overlay)\n range_tool.observe(self._range_selection_handler, \"selection\")\n\n # Attach a handler that sets the tool when the plot's index range changes\n self.range_tool = range_tool\n price_plot.index_range.observe(self._plot_range_handler, \"updated\")\n\n return price_plot, miniplot\n\n def _range_selection_handler(self, event):\n range_selection_event = event.new\n # The event obj should be a tuple (low, high) in data space\n if range_selection_event is not None:\n low, high = range_selection_event\n self.price_plot.index_range.low = low\n self.price_plot.index_range.high = high\n else:\n self.price_plot.index_range.set_bounds(\"auto\", \"auto\")\n\n def _plot_range_handler(self, event):\n plot_range_event = event.new\n if plot_range_event is not None:\n low, high = plot_range_event\n if \"auto\" not in (low, high):\n self.range_tool.selection = (low, high)\n\n def _create_vol_plot(self, times, volumes, height=100):\n \"Creates and returns the volume plot\"\n index_range = self.price_plot.index_range\n vol_plot = BarPlot(\n index=times,\n value=volumes,\n index_mapper=LinearMapper(range=index_range),\n value_mapper=LinearMapper(range=DataRange1D(volumes)),\n line_color=\"transparent\",\n fill_color=\"black\",\n bar_width=1.0,\n bar_width_type=\"screen\",\n antialias=False,\n height=100,\n resizable=\"h\",\n bgcolor=\"white\",\n border_visible=True,\n )\n vol_plot.tools.append(\n PanTool(vol_plot, constrain=True, constrain_direction=\"x\")\n )\n return vol_plot\n\n def _create_component(self):\n\n # Create the data and datasource objects\n # In order for the date axis to work, the index data points need to\n # be in units of seconds since the epoch. This is because we are using\n # the CalendarScaleSystem, whose formatters interpret the numerical values\n # as seconds since the epoch.\n numpoints = 500\n index = create_dates(numpoints)\n returns = random.lognormal(0.01, 0.1, size=numpoints)\n price = 100.0 * cumprod(returns)\n volume = abs(random.normal(1000.0, 1500.0, size=numpoints) + 2000.0)\n\n time_ds = ArrayDataSource(index)\n vol_ds = ArrayDataSource(volume, sort_order=\"none\")\n price_ds = ArrayDataSource(price, sort_order=\"none\")\n\n # Create the price plots\n price_plot, mini_plot = self._create_price_plots(time_ds, price_ds)\n price_plot.index_mapper.domain_limits = (index[0], index[-1])\n self.price_plot = price_plot\n self.mini_plot = mini_plot\n\n # Create the volume plot\n vol_plot = self._create_vol_plot(time_ds, vol_ds)\n vol_plot.index_mapper.domain_limits = (index[0], index[-1])\n\n # Set the plot's bottom axis to use the Scales ticking system\n ticker = ScalesTickGenerator(scale=CalendarScaleSystem())\n for plot in price_plot, mini_plot, vol_plot:\n bottom_axis = PlotAxis(\n plot, orientation=\"bottom\", tick_generator=ticker\n )\n plot.overlays.append(bottom_axis)\n plot.overlays.append(PlotAxis(plot, orientation=\"left\"))\n hgrid, vgrid = add_default_grids(plot)\n vgrid.tick_generator = bottom_axis.tick_generator\n\n container = VPlotContainer(\n bgcolor=\"lightgray\", spacing=40, padding=50, fill_padding=False\n )\n container.add(mini_plot, vol_plot, price_plot)\n\n return container\n\n\nif __name__ == \"__main__\":\n # Save demo so that it doesn't get garbage collected when run within\n # existing event loop (i.e. from ipython).\n demo = demo_main(\n PlotFrame, size=(800, 600), title=\"Stock price and volume\"\n )\n", "path": "examples/demo/financial/stock_prices.py"}]}
| 3,207 | 126 |
gh_patches_debug_4443
|
rasdani/github-patches
|
git_diff
|
pytorch__text-145
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
AttributeError: 'list' object has no attribute 'rstrip'
Hi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.
It gives me the following error:
```
File "/Library/Python/2.7/site-packages/torchtext/data/example.py", line 59, in fromlist
setattr(ex, name, field.preprocess(val.rstrip('\n')))
AttributeError: 'list' object has no attribute 'rstrip'
```
Thanks!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `torchtext/data/example.py`
Content:
```
1 import csv
2 import json
3
4 import six
5
6
7 class Example(object):
8 """Defines a single training or test example.
9
10 Stores each column of the example as an attribute.
11 """
12
13 @classmethod
14 def fromJSON(cls, data, fields):
15 return cls.fromdict(json.loads(data), fields)
16
17 @classmethod
18 def fromdict(cls, data, fields):
19 ex = cls()
20 for key, vals in fields.items():
21 if key not in data:
22 raise ValueError("Specified key {} was not found in "
23 "the input data".format(key))
24 if vals is not None:
25 if not isinstance(vals, list):
26 vals = [vals]
27 for val in vals:
28 name, field = val
29 setattr(ex, name, field.preprocess(data[key]))
30 return ex
31
32 @classmethod
33 def fromTSV(cls, data, fields):
34 return cls.fromlist(data.split('\t'), fields)
35
36 @classmethod
37 def fromCSV(cls, data, fields):
38 data = data.rstrip("\n")
39 # If Python 2, encode to utf-8 since CSV doesn't take unicode input
40 if six.PY2:
41 data = data.encode('utf-8')
42 # Use Python CSV module to parse the CSV line
43 parsed_csv_lines = csv.reader([data])
44
45 # If Python 2, decode back to unicode (the original input format).
46 if six.PY2:
47 for line in parsed_csv_lines:
48 parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]
49 break
50 else:
51 parsed_csv_line = list(parsed_csv_lines)[0]
52 return cls.fromlist(parsed_csv_line, fields)
53
54 @classmethod
55 def fromlist(cls, data, fields):
56 ex = cls()
57 for (name, field), val in zip(fields, data):
58 if field is not None:
59 setattr(ex, name, field.preprocess(val.rstrip('\n')))
60 return ex
61
62 @classmethod
63 def fromtree(cls, data, fields, subtrees=False):
64 try:
65 from nltk.tree import Tree
66 except ImportError:
67 print("Please install NLTK. "
68 "See the docs at http://nltk.org for more information.")
69 raise
70 tree = Tree.fromstring(data)
71 if subtrees:
72 return [cls.fromlist(
73 [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]
74 return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)
75
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/torchtext/data/example.py b/torchtext/data/example.py
--- a/torchtext/data/example.py
+++ b/torchtext/data/example.py
@@ -56,7 +56,9 @@
ex = cls()
for (name, field), val in zip(fields, data):
if field is not None:
- setattr(ex, name, field.preprocess(val.rstrip('\n')))
+ if isinstance(val, six.string_types):
+ val = val.rstrip('\n')
+ setattr(ex, name, field.preprocess(val))
return ex
@classmethod
|
{"golden_diff": "diff --git a/torchtext/data/example.py b/torchtext/data/example.py\n--- a/torchtext/data/example.py\n+++ b/torchtext/data/example.py\n@@ -56,7 +56,9 @@\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n- setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n+ if isinstance(val, six.string_types):\n+ val = val.rstrip('\\n')\n+ setattr(ex, name, field.preprocess(val))\n return ex\n \n @classmethod\n", "issue": "AttributeError: 'list' object has no attribute 'rstrip'\nHi all, previously torchtext works for me when I'm running anaconda python. However, now, when i uninstalled my anaconda python. It stops working.\r\n\r\nIt gives me the following error: \r\n\r\n```\r\nFile \"/Library/Python/2.7/site-packages/torchtext/data/example.py\", line 59, in fromlist\r\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\r\nAttributeError: 'list' object has no attribute 'rstrip'\r\n\r\n```\r\n\r\nThanks!\n", "before_files": [{"content": "import csv\nimport json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n data = data.rstrip(\"\\n\")\n # If Python 2, encode to utf-8 since CSV doesn't take unicode input\n if six.PY2:\n data = data.encode('utf-8')\n # Use Python CSV module to parse the CSV line\n parsed_csv_lines = csv.reader([data])\n\n # If Python 2, decode back to unicode (the original input format).\n if six.PY2:\n for line in parsed_csv_lines:\n parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]\n break\n else:\n parsed_csv_line = list(parsed_csv_lines)[0]\n return cls.fromlist(parsed_csv_line, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n setattr(ex, name, field.preprocess(val.rstrip('\\n')))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}], "after_files": [{"content": "import csv\nimport json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromTSV(cls, data, fields):\n return cls.fromlist(data.split('\\t'), fields)\n\n @classmethod\n def fromCSV(cls, data, fields):\n data = data.rstrip(\"\\n\")\n # If Python 2, encode to utf-8 since CSV doesn't take unicode input\n if six.PY2:\n data = data.encode('utf-8')\n # Use Python CSV module to parse the CSV line\n parsed_csv_lines = csv.reader([data])\n\n # If Python 2, decode back to unicode (the original input format).\n if six.PY2:\n for line in parsed_csv_lines:\n parsed_csv_line = [six.text_type(col, 'utf-8') for col in line]\n break\n else:\n parsed_csv_line = list(parsed_csv_lines)[0]\n return cls.fromlist(parsed_csv_line, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n if isinstance(val, six.string_types):\n val = val.rstrip('\\n')\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n", "path": "torchtext/data/example.py"}]}
| 1,059 | 128 |
gh_patches_debug_5186
|
rasdani/github-patches
|
git_diff
|
talonhub__community-861
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
formatter accuracy
This has come up a couple of times. I suspect you'll improve formatter accuracy if you make these changes:
"allcaps" -> "all cap"
"alldown" -> "all down"
The reason for this is e.g. "alldown" is not an english word, D sounds like T, so "alld" sounds like alt, and you get "alt down".
Same for allcaps. Allc sounds a bit like alt, and you get "alt cap".
allcaps has a second problem. If you say "allcaps something", the S S (cap(s s)omething) chain will blend together in your speech and you'll get something that might sound more like "alt cap something". Hence removing the S.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `code/formatters.py`
Content:
```
1 from talon import Module, Context, actions, ui, imgui, app
2 from talon.grammar import Phrase
3 from typing import List, Union
4 import logging
5 import re
6
7 ctx = Context()
8 key = actions.key
9 edit = actions.edit
10
11 words_to_keep_lowercase = "a an the at by for in is of on to up and as but or nor".split()
12
13 # The last phrase spoken, without & with formatting. Used for reformatting.
14 last_phrase = ""
15 last_phrase_formatted = ""
16
17 # Internally, a formatter is a pair (sep, fn).
18 #
19 # - sep: a boolean, true iff the formatter should leave spaces between words.
20 # We use SEP & NOSEP for this for clarity.
21 #
22 # - fn: a function (i, word, is_end) --> formatted_word, called on each `word`.
23 # `i` is the word's index in the list, and `is_end` is True iff it's the
24 # last word in the list.
25 SEP = True
26 NOSEP = False
27
28
29 def format_phrase(m: Union[str, Phrase], formatters: str):
30 global last_phrase, last_phrase_formatted
31 last_phrase = m
32 words = []
33 if isinstance(m, str):
34 words = m.split(" ")
35 else:
36 # FIXME: I believe this is no longer necessary. -rntz, 2022-02-10
37 if m.words[-1] == "over":
38 m.words = m.words[:-1]
39 words = actions.dictate.replace_words(actions.dictate.parse_words(m))
40
41 result = last_phrase_formatted = format_phrase_without_adding_to_history(words, formatters)
42 actions.user.add_phrase_to_history(result)
43 # Arguably, we shouldn't be dealing with history here, but somewhere later
44 # down the line. But we have a bunch of code that relies on doing it this
45 # way and I don't feel like rewriting it just now. -rntz, 2020-11-04
46 return result
47
48
49 def format_phrase_without_adding_to_history(word_list, formatters: str):
50 # A formatter is a pair (keep_spaces, function). We drop spaces if any
51 # formatter does; we apply their functions in reverse order.
52 formatters = [all_formatters[name] for name in formatters.split(',')]
53 separator = ' ' if all(x[0] for x in formatters) else ''
54 functions = [x[1] for x in reversed(formatters)]
55 words = []
56 for i, word in enumerate(word_list):
57 for f in functions:
58 word = f(i, word, i == len(word_list) - 1)
59 words.append(word)
60 return separator.join(words)
61
62
63 # Formatter helpers
64 def surround(by):
65 return lambda i, word, last: (by if i == 0 else '') + word + (by if last else '')
66
67
68 def words_with_joiner(joiner):
69 """Pass through words unchanged, but add a separator between them."""
70 return (NOSEP, lambda i, word, _: ('' if i == 0 else joiner) + word)
71
72
73 def first_vs_rest(first_func, rest_func=lambda w: w):
74 """Supply one or two transformer functions for the first and rest of
75 words respectively.
76
77 Leave second argument out if you want all but the first word to be passed
78 through unchanged.
79 Set first argument to None if you want the first word to be passed
80 through unchanged.
81 """
82 first_func = first_func or (lambda w: w)
83 return lambda i, word, _: first_func(word) if i == 0 else rest_func(word)
84
85
86 def every_word(word_func):
87 """Apply one function to every word."""
88 return lambda i, word, _: word_func(word)
89
90
91 formatters_dict = {
92 "NOOP": (SEP, lambda i, word, _: word),
93 "DOUBLE_UNDERSCORE": (NOSEP, first_vs_rest(lambda w: "__%s__" % w)),
94 "PRIVATE_CAMEL_CASE": (NOSEP, first_vs_rest(lambda w: w.lower(), lambda w: w.capitalize())),
95 "PROTECTED_CAMEL_CASE": (NOSEP, first_vs_rest(lambda w: w.lower(), lambda w: w.capitalize())),
96 "PUBLIC_CAMEL_CASE": (NOSEP, every_word(lambda w: w.capitalize())),
97 "SNAKE_CASE": (
98 NOSEP,
99 first_vs_rest(lambda w: w.lower(), lambda w: "_" + w.lower()),
100 ),
101 "NO_SPACES": (NOSEP, every_word(lambda w: w)),
102 "DASH_SEPARATED": words_with_joiner("-"),
103 "TERMINAL_DASH_SEPARATED": (
104 NOSEP,
105 first_vs_rest(lambda w: " --" + w.lower(), lambda w: "-" + w.lower()),
106 ),
107 "DOUBLE_COLON_SEPARATED": words_with_joiner("::"),
108 "ALL_CAPS": (SEP, every_word(lambda w: w.upper())),
109 "ALL_LOWERCASE": (SEP, every_word(lambda w: w.lower())),
110 "DOUBLE_QUOTED_STRING": (SEP, surround('"')),
111 "SINGLE_QUOTED_STRING": (SEP, surround("'")),
112 "SPACE_SURROUNDED_STRING": (SEP, surround(" ")),
113 "DOT_SEPARATED": words_with_joiner("."),
114 "DOT_SNAKE": (NOSEP, lambda i, word, _: "." + word if i == 0 else "_" + word),
115 "SLASH_SEPARATED": (NOSEP, every_word(lambda w: "/" + w)),
116 "CAPITALIZE_FIRST_WORD": (SEP, first_vs_rest(lambda w: w.capitalize())),
117 "CAPITALIZE_ALL_WORDS": (
118 SEP,
119 lambda i, word, _: word.capitalize()
120 if i == 0 or word not in words_to_keep_lowercase
121 else word,
122 ),
123 }
124
125 # This is the mapping from spoken phrases to formatters
126 formatters_words = {
127 "all caps": formatters_dict["ALL_CAPS"],
128 "all down": formatters_dict["ALL_LOWERCASE"],
129 "camel": formatters_dict["PRIVATE_CAMEL_CASE"],
130 "dotted": formatters_dict["DOT_SEPARATED"],
131 "dub string": formatters_dict["DOUBLE_QUOTED_STRING"],
132 "dunder": formatters_dict["DOUBLE_UNDERSCORE"],
133 "hammer": formatters_dict["PUBLIC_CAMEL_CASE"],
134 "kebab": formatters_dict["DASH_SEPARATED"],
135 "packed": formatters_dict["DOUBLE_COLON_SEPARATED"],
136 "padded": formatters_dict["SPACE_SURROUNDED_STRING"],
137 "slasher": formatters_dict["SLASH_SEPARATED"],
138 "smash": formatters_dict["NO_SPACES"],
139 "snake": formatters_dict["SNAKE_CASE"],
140 "string": formatters_dict["SINGLE_QUOTED_STRING"],
141 "title": formatters_dict["CAPITALIZE_ALL_WORDS"],
142 }
143
144 all_formatters = {}
145 all_formatters.update(formatters_dict)
146 all_formatters.update(formatters_words)
147
148 mod = Module()
149 mod.list("formatters", desc="list of formatters")
150 mod.list(
151 "prose_formatter",
152 desc="words to start dictating prose, and the formatter they apply",
153 )
154
155
156 @mod.capture(rule="{self.formatters}+")
157 def formatters(m) -> str:
158 "Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'"
159 return ",".join(m.formatters_list)
160
161
162 @mod.capture(
163 # Note that if the user speaks something like "snake dot", it will
164 # insert "dot" - otherwise, they wouldn't be able to insert punctuation
165 # words directly.
166 rule="<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*"
167 )
168 def format_text(m) -> str:
169 "Formats the text and returns a string"
170 out = ""
171 formatters = m[0]
172 for chunk in m[1:]:
173 if isinstance(chunk, ImmuneString):
174 out += chunk.string
175 else:
176 out += format_phrase(chunk, formatters)
177 return out
178
179
180 class ImmuneString(object):
181 """Wrapper that makes a string immune from formatting."""
182
183 def __init__(self, string):
184 self.string = string
185
186
187 @mod.capture(
188 # Add anything else into this that you want to be able to speak during a
189 # formatter.
190 rule="(<user.symbol_key> | numb <number>)"
191 )
192 def formatter_immune(m) -> ImmuneString:
193 """Text that can be interspersed into a formatter, e.g. characters.
194
195 It will be inserted directly, without being formatted.
196
197 """
198 if hasattr(m, "number"):
199 value = m.number
200 else:
201 value = m[0]
202 return ImmuneString(str(value))
203
204
205 @mod.action_class
206 class Actions:
207 def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:
208 """Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')"""
209 return format_phrase(phrase, formatters)
210
211 def insert_formatted(phrase: Union[str, Phrase], formatters: str):
212 """Inserts a phrase formatted according to formatters. Formatters is a comma separated list of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')"""
213 actions.insert(format_phrase(phrase, formatters))
214
215 def insert_with_history(text: str):
216 """Inserts some text, remembering it in the phrase history."""
217 actions.user.add_phrase_to_history(text)
218 actions.insert(text)
219
220 def formatters_reformat_last(formatters: str) -> str:
221 """Clears and reformats last formatted phrase"""
222 global last_phrase, last_phrase_formatted
223 if actions.user.get_last_phrase() != last_phrase_formatted:
224 # The last thing we inserted isn't the same as the last thing we
225 # formatted, so abort.
226 logging.warning(
227 "formatters_reformat_last(): Last phrase wasn't a formatter!"
228 )
229 return
230 actions.user.clear_last_phrase()
231 actions.user.insert_formatted(last_phrase, formatters)
232
233 def formatters_reformat_selection(formatters: str) -> str:
234 """Reformats the current selection."""
235 selected = edit.selected_text()
236 if not selected:
237 print("Asked to reformat selection, but nothing selected!")
238 return
239 unformatted = unformat_text(selected)
240 # Delete separately for compatibility with programs that don't overwrite
241 # selected text (e.g. Emacs)
242 edit.delete()
243 text = actions.self.formatted_text(unformatted, formatters)
244 actions.insert(text)
245 return text
246
247 def get_formatters_words():
248 """returns a list of words currently used as formatters, and a demonstration string using those formatters"""
249 formatters_help_demo = {}
250 for name in sorted(set(formatters_words.keys())):
251 formatters_help_demo[name] = format_phrase_without_adding_to_history(['one', 'two', 'three'], name)
252 return formatters_help_demo
253
254 def reformat_text(text: str, formatters: str) -> str:
255 """Reformat the text."""
256 unformatted = unformat_text(text)
257 return actions.user.formatted_text(unformatted, formatters)
258
259 def insert_many(strings: List[str]) -> None:
260 """Insert a list of strings, sequentially."""
261 for string in strings:
262 actions.insert(string)
263
264 def unformat_text(text: str) -> str:
265 """Remove format from text"""
266 unformatted = re.sub(r"[^\w]+", " ", text)
267 # Split on camelCase, including numbers
268 # FIXME: handle non-ASCII letters!
269 unformatted = re.sub(r"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|(?<=[a-zA-Z])(?=[0-9])|(?<=[0-9])(?=[a-zA-Z])", " ", unformatted)
270 # TODO: Separate out studleycase vars
271 return unformatted.lower()
272
273
274 ctx.lists["self.formatters"] = formatters_words.keys()
275 ctx.lists["self.prose_formatter"] = {
276 "say": "NOOP",
277 "speak": "NOOP",
278 "sentence": "CAPITALIZE_FIRST_WORD",
279 }
280
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/code/formatters.py b/code/formatters.py
--- a/code/formatters.py
+++ b/code/formatters.py
@@ -124,7 +124,7 @@
# This is the mapping from spoken phrases to formatters
formatters_words = {
- "all caps": formatters_dict["ALL_CAPS"],
+ "all cap": formatters_dict["ALL_CAPS"],
"all down": formatters_dict["ALL_LOWERCASE"],
"camel": formatters_dict["PRIVATE_CAMEL_CASE"],
"dotted": formatters_dict["DOT_SEPARATED"],
|
{"golden_diff": "diff --git a/code/formatters.py b/code/formatters.py\n--- a/code/formatters.py\n+++ b/code/formatters.py\n@@ -124,7 +124,7 @@\n \n # This is the mapping from spoken phrases to formatters\n formatters_words = {\n- \"all caps\": formatters_dict[\"ALL_CAPS\"],\n+ \"all cap\": formatters_dict[\"ALL_CAPS\"],\n \"all down\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n", "issue": "formatter accuracy\nThis has come up a couple of times. I suspect you'll improve formatter accuracy if you make these changes:\r\n\r\n\"allcaps\" -> \"all cap\"\r\n\"alldown\" -> \"all down\"\r\n\r\nThe reason for this is e.g. \"alldown\" is not an english word, D sounds like T, so \"alld\" sounds like alt, and you get \"alt down\".\r\nSame for allcaps. Allc sounds a bit like alt, and you get \"alt cap\".\r\n\r\nallcaps has a second problem. If you say \"allcaps something\", the S S (cap(s s)omething) chain will blend together in your speech and you'll get something that might sound more like \"alt cap something\". Hence removing the S.\n", "before_files": [{"content": "from talon import Module, Context, actions, ui, imgui, app\nfrom talon.grammar import Phrase\nfrom typing import List, Union\nimport logging\nimport re\n\nctx = Context()\nkey = actions.key\nedit = actions.edit\n\nwords_to_keep_lowercase = \"a an the at by for in is of on to up and as but or nor\".split()\n\n# The last phrase spoken, without & with formatting. Used for reformatting.\nlast_phrase = \"\"\nlast_phrase_formatted = \"\"\n\n# Internally, a formatter is a pair (sep, fn).\n#\n# - sep: a boolean, true iff the formatter should leave spaces between words.\n# We use SEP & NOSEP for this for clarity.\n#\n# - fn: a function (i, word, is_end) --> formatted_word, called on each `word`.\n# `i` is the word's index in the list, and `is_end` is True iff it's the\n# last word in the list.\nSEP = True\nNOSEP = False\n\n\ndef format_phrase(m: Union[str, Phrase], formatters: str):\n global last_phrase, last_phrase_formatted\n last_phrase = m\n words = []\n if isinstance(m, str):\n words = m.split(\" \")\n else:\n # FIXME: I believe this is no longer necessary. -rntz, 2022-02-10\n if m.words[-1] == \"over\":\n m.words = m.words[:-1]\n words = actions.dictate.replace_words(actions.dictate.parse_words(m))\n\n result = last_phrase_formatted = format_phrase_without_adding_to_history(words, formatters)\n actions.user.add_phrase_to_history(result)\n # Arguably, we shouldn't be dealing with history here, but somewhere later\n # down the line. But we have a bunch of code that relies on doing it this\n # way and I don't feel like rewriting it just now. -rntz, 2020-11-04\n return result\n\n\ndef format_phrase_without_adding_to_history(word_list, formatters: str):\n # A formatter is a pair (keep_spaces, function). We drop spaces if any\n # formatter does; we apply their functions in reverse order.\n formatters = [all_formatters[name] for name in formatters.split(',')]\n separator = ' ' if all(x[0] for x in formatters) else ''\n functions = [x[1] for x in reversed(formatters)]\n words = []\n for i, word in enumerate(word_list):\n for f in functions:\n word = f(i, word, i == len(word_list) - 1)\n words.append(word)\n return separator.join(words)\n\n\n# Formatter helpers\ndef surround(by):\n return lambda i, word, last: (by if i == 0 else '') + word + (by if last else '')\n\n\ndef words_with_joiner(joiner):\n \"\"\"Pass through words unchanged, but add a separator between them.\"\"\"\n return (NOSEP, lambda i, word, _: ('' if i == 0 else joiner) + word)\n\n\ndef first_vs_rest(first_func, rest_func=lambda w: w):\n \"\"\"Supply one or two transformer functions for the first and rest of\n words respectively.\n\n Leave second argument out if you want all but the first word to be passed\n through unchanged.\n Set first argument to None if you want the first word to be passed\n through unchanged.\n \"\"\"\n first_func = first_func or (lambda w: w)\n return lambda i, word, _: first_func(word) if i == 0 else rest_func(word)\n\n\ndef every_word(word_func):\n \"\"\"Apply one function to every word.\"\"\"\n return lambda i, word, _: word_func(word)\n\n\nformatters_dict = {\n \"NOOP\": (SEP, lambda i, word, _: word),\n \"DOUBLE_UNDERSCORE\": (NOSEP, first_vs_rest(lambda w: \"__%s__\" % w)),\n \"PRIVATE_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w.lower(), lambda w: w.capitalize())),\n \"PROTECTED_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w.lower(), lambda w: w.capitalize())),\n \"PUBLIC_CAMEL_CASE\": (NOSEP, every_word(lambda w: w.capitalize())),\n \"SNAKE_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w.lower(), lambda w: \"_\" + w.lower()),\n ),\n \"NO_SPACES\": (NOSEP, every_word(lambda w: w)),\n \"DASH_SEPARATED\": words_with_joiner(\"-\"),\n \"TERMINAL_DASH_SEPARATED\": (\n NOSEP,\n first_vs_rest(lambda w: \" --\" + w.lower(), lambda w: \"-\" + w.lower()),\n ),\n \"DOUBLE_COLON_SEPARATED\": words_with_joiner(\"::\"),\n \"ALL_CAPS\": (SEP, every_word(lambda w: w.upper())),\n \"ALL_LOWERCASE\": (SEP, every_word(lambda w: w.lower())),\n \"DOUBLE_QUOTED_STRING\": (SEP, surround('\"')),\n \"SINGLE_QUOTED_STRING\": (SEP, surround(\"'\")),\n \"SPACE_SURROUNDED_STRING\": (SEP, surround(\" \")),\n \"DOT_SEPARATED\": words_with_joiner(\".\"),\n \"DOT_SNAKE\": (NOSEP, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"SLASH_SEPARATED\": (NOSEP, every_word(lambda w: \"/\" + w)),\n \"CAPITALIZE_FIRST_WORD\": (SEP, first_vs_rest(lambda w: w.capitalize())),\n \"CAPITALIZE_ALL_WORDS\": (\n SEP,\n lambda i, word, _: word.capitalize()\n if i == 0 or word not in words_to_keep_lowercase\n else word,\n ),\n}\n\n# This is the mapping from spoken phrases to formatters\nformatters_words = {\n \"all caps\": formatters_dict[\"ALL_CAPS\"],\n \"all down\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n \"dub string\": formatters_dict[\"DOUBLE_QUOTED_STRING\"],\n \"dunder\": formatters_dict[\"DOUBLE_UNDERSCORE\"],\n \"hammer\": formatters_dict[\"PUBLIC_CAMEL_CASE\"],\n \"kebab\": formatters_dict[\"DASH_SEPARATED\"],\n \"packed\": formatters_dict[\"DOUBLE_COLON_SEPARATED\"],\n \"padded\": formatters_dict[\"SPACE_SURROUNDED_STRING\"],\n \"slasher\": formatters_dict[\"SLASH_SEPARATED\"],\n \"smash\": formatters_dict[\"NO_SPACES\"],\n \"snake\": formatters_dict[\"SNAKE_CASE\"],\n \"string\": formatters_dict[\"SINGLE_QUOTED_STRING\"],\n \"title\": formatters_dict[\"CAPITALIZE_ALL_WORDS\"],\n}\n\nall_formatters = {}\nall_formatters.update(formatters_dict)\nall_formatters.update(formatters_words)\n\nmod = Module()\nmod.list(\"formatters\", desc=\"list of formatters\")\nmod.list(\n \"prose_formatter\",\n desc=\"words to start dictating prose, and the formatter they apply\",\n)\n\n\[email protected](rule=\"{self.formatters}+\")\ndef formatters(m) -> str:\n \"Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'\"\n return \",\".join(m.formatters_list)\n\n\[email protected](\n # Note that if the user speaks something like \"snake dot\", it will\n # insert \"dot\" - otherwise, they wouldn't be able to insert punctuation\n # words directly.\n rule=\"<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*\"\n)\ndef format_text(m) -> str:\n \"Formats the text and returns a string\"\n out = \"\"\n formatters = m[0]\n for chunk in m[1:]:\n if isinstance(chunk, ImmuneString):\n out += chunk.string\n else:\n out += format_phrase(chunk, formatters)\n return out\n\n\nclass ImmuneString(object):\n \"\"\"Wrapper that makes a string immune from formatting.\"\"\"\n\n def __init__(self, string):\n self.string = string\n\n\[email protected](\n # Add anything else into this that you want to be able to speak during a\n # formatter.\n rule=\"(<user.symbol_key> | numb <number>)\"\n)\ndef formatter_immune(m) -> ImmuneString:\n \"\"\"Text that can be interspersed into a formatter, e.g. characters.\n\n It will be inserted directly, without being formatted.\n\n \"\"\"\n if hasattr(m, \"number\"):\n value = m.number\n else:\n value = m[0]\n return ImmuneString(str(value))\n\n\[email protected]_class\nclass Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n \"\"\"Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n return format_phrase(phrase, formatters)\n\n def insert_formatted(phrase: Union[str, Phrase], formatters: str):\n \"\"\"Inserts a phrase formatted according to formatters. Formatters is a comma separated list of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n actions.insert(format_phrase(phrase, formatters))\n\n def insert_with_history(text: str):\n \"\"\"Inserts some text, remembering it in the phrase history.\"\"\"\n actions.user.add_phrase_to_history(text)\n actions.insert(text)\n\n def formatters_reformat_last(formatters: str) -> str:\n \"\"\"Clears and reformats last formatted phrase\"\"\"\n global last_phrase, last_phrase_formatted\n if actions.user.get_last_phrase() != last_phrase_formatted:\n # The last thing we inserted isn't the same as the last thing we\n # formatted, so abort.\n logging.warning(\n \"formatters_reformat_last(): Last phrase wasn't a formatter!\"\n )\n return\n actions.user.clear_last_phrase()\n actions.user.insert_formatted(last_phrase, formatters)\n\n def formatters_reformat_selection(formatters: str) -> str:\n \"\"\"Reformats the current selection.\"\"\"\n selected = edit.selected_text()\n if not selected:\n print(\"Asked to reformat selection, but nothing selected!\")\n return\n unformatted = unformat_text(selected)\n # Delete separately for compatibility with programs that don't overwrite\n # selected text (e.g. Emacs)\n edit.delete()\n text = actions.self.formatted_text(unformatted, formatters)\n actions.insert(text)\n return text\n\n def get_formatters_words():\n \"\"\"returns a list of words currently used as formatters, and a demonstration string using those formatters\"\"\"\n formatters_help_demo = {}\n for name in sorted(set(formatters_words.keys())):\n formatters_help_demo[name] = format_phrase_without_adding_to_history(['one', 'two', 'three'], name)\n return formatters_help_demo\n\n def reformat_text(text: str, formatters: str) -> str:\n \"\"\"Reformat the text.\"\"\"\n unformatted = unformat_text(text)\n return actions.user.formatted_text(unformatted, formatters)\n\n def insert_many(strings: List[str]) -> None:\n \"\"\"Insert a list of strings, sequentially.\"\"\"\n for string in strings:\n actions.insert(string)\n\ndef unformat_text(text: str) -> str:\n \"\"\"Remove format from text\"\"\"\n unformatted = re.sub(r\"[^\\w]+\", \" \", text)\n # Split on camelCase, including numbers\n # FIXME: handle non-ASCII letters!\n unformatted = re.sub(r\"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|(?<=[a-zA-Z])(?=[0-9])|(?<=[0-9])(?=[a-zA-Z])\", \" \", unformatted)\n # TODO: Separate out studleycase vars\n return unformatted.lower()\n\n\nctx.lists[\"self.formatters\"] = formatters_words.keys()\nctx.lists[\"self.prose_formatter\"] = {\n \"say\": \"NOOP\",\n \"speak\": \"NOOP\",\n \"sentence\": \"CAPITALIZE_FIRST_WORD\",\n}\n", "path": "code/formatters.py"}], "after_files": [{"content": "from talon import Module, Context, actions, ui, imgui, app\nfrom talon.grammar import Phrase\nfrom typing import List, Union\nimport logging\nimport re\n\nctx = Context()\nkey = actions.key\nedit = actions.edit\n\nwords_to_keep_lowercase = \"a an the at by for in is of on to up and as but or nor\".split()\n\n# The last phrase spoken, without & with formatting. Used for reformatting.\nlast_phrase = \"\"\nlast_phrase_formatted = \"\"\n\n# Internally, a formatter is a pair (sep, fn).\n#\n# - sep: a boolean, true iff the formatter should leave spaces between words.\n# We use SEP & NOSEP for this for clarity.\n#\n# - fn: a function (i, word, is_end) --> formatted_word, called on each `word`.\n# `i` is the word's index in the list, and `is_end` is True iff it's the\n# last word in the list.\nSEP = True\nNOSEP = False\n\n\ndef format_phrase(m: Union[str, Phrase], formatters: str):\n global last_phrase, last_phrase_formatted\n last_phrase = m\n words = []\n if isinstance(m, str):\n words = m.split(\" \")\n else:\n # FIXME: I believe this is no longer necessary. -rntz, 2022-02-10\n if m.words[-1] == \"over\":\n m.words = m.words[:-1]\n words = actions.dictate.replace_words(actions.dictate.parse_words(m))\n\n result = last_phrase_formatted = format_phrase_without_adding_to_history(words, formatters)\n actions.user.add_phrase_to_history(result)\n # Arguably, we shouldn't be dealing with history here, but somewhere later\n # down the line. But we have a bunch of code that relies on doing it this\n # way and I don't feel like rewriting it just now. -rntz, 2020-11-04\n return result\n\n\ndef format_phrase_without_adding_to_history(word_list, formatters: str):\n # A formatter is a pair (keep_spaces, function). We drop spaces if any\n # formatter does; we apply their functions in reverse order.\n formatters = [all_formatters[name] for name in formatters.split(',')]\n separator = ' ' if all(x[0] for x in formatters) else ''\n functions = [x[1] for x in reversed(formatters)]\n words = []\n for i, word in enumerate(word_list):\n for f in functions:\n word = f(i, word, i == len(word_list) - 1)\n words.append(word)\n return separator.join(words)\n\n\n# Formatter helpers\ndef surround(by):\n return lambda i, word, last: (by if i == 0 else '') + word + (by if last else '')\n\n\ndef words_with_joiner(joiner):\n \"\"\"Pass through words unchanged, but add a separator between them.\"\"\"\n return (NOSEP, lambda i, word, _: ('' if i == 0 else joiner) + word)\n\n\ndef first_vs_rest(first_func, rest_func=lambda w: w):\n \"\"\"Supply one or two transformer functions for the first and rest of\n words respectively.\n\n Leave second argument out if you want all but the first word to be passed\n through unchanged.\n Set first argument to None if you want the first word to be passed\n through unchanged.\n \"\"\"\n first_func = first_func or (lambda w: w)\n return lambda i, word, _: first_func(word) if i == 0 else rest_func(word)\n\n\ndef every_word(word_func):\n \"\"\"Apply one function to every word.\"\"\"\n return lambda i, word, _: word_func(word)\n\n\nformatters_dict = {\n \"NOOP\": (SEP, lambda i, word, _: word),\n \"DOUBLE_UNDERSCORE\": (NOSEP, first_vs_rest(lambda w: \"__%s__\" % w)),\n \"PRIVATE_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w.lower(), lambda w: w.capitalize())),\n \"PROTECTED_CAMEL_CASE\": (NOSEP, first_vs_rest(lambda w: w.lower(), lambda w: w.capitalize())),\n \"PUBLIC_CAMEL_CASE\": (NOSEP, every_word(lambda w: w.capitalize())),\n \"SNAKE_CASE\": (\n NOSEP,\n first_vs_rest(lambda w: w.lower(), lambda w: \"_\" + w.lower()),\n ),\n \"NO_SPACES\": (NOSEP, every_word(lambda w: w)),\n \"DASH_SEPARATED\": words_with_joiner(\"-\"),\n \"TERMINAL_DASH_SEPARATED\": (\n NOSEP,\n first_vs_rest(lambda w: \" --\" + w.lower(), lambda w: \"-\" + w.lower()),\n ),\n \"DOUBLE_COLON_SEPARATED\": words_with_joiner(\"::\"),\n \"ALL_CAPS\": (SEP, every_word(lambda w: w.upper())),\n \"ALL_LOWERCASE\": (SEP, every_word(lambda w: w.lower())),\n \"DOUBLE_QUOTED_STRING\": (SEP, surround('\"')),\n \"SINGLE_QUOTED_STRING\": (SEP, surround(\"'\")),\n \"SPACE_SURROUNDED_STRING\": (SEP, surround(\" \")),\n \"DOT_SEPARATED\": words_with_joiner(\".\"),\n \"DOT_SNAKE\": (NOSEP, lambda i, word, _: \".\" + word if i == 0 else \"_\" + word),\n \"SLASH_SEPARATED\": (NOSEP, every_word(lambda w: \"/\" + w)),\n \"CAPITALIZE_FIRST_WORD\": (SEP, first_vs_rest(lambda w: w.capitalize())),\n \"CAPITALIZE_ALL_WORDS\": (\n SEP,\n lambda i, word, _: word.capitalize()\n if i == 0 or word not in words_to_keep_lowercase\n else word,\n ),\n}\n\n# This is the mapping from spoken phrases to formatters\nformatters_words = {\n \"all cap\": formatters_dict[\"ALL_CAPS\"],\n \"all down\": formatters_dict[\"ALL_LOWERCASE\"],\n \"camel\": formatters_dict[\"PRIVATE_CAMEL_CASE\"],\n \"dotted\": formatters_dict[\"DOT_SEPARATED\"],\n \"dub string\": formatters_dict[\"DOUBLE_QUOTED_STRING\"],\n \"dunder\": formatters_dict[\"DOUBLE_UNDERSCORE\"],\n \"hammer\": formatters_dict[\"PUBLIC_CAMEL_CASE\"],\n \"kebab\": formatters_dict[\"DASH_SEPARATED\"],\n \"packed\": formatters_dict[\"DOUBLE_COLON_SEPARATED\"],\n \"padded\": formatters_dict[\"SPACE_SURROUNDED_STRING\"],\n \"slasher\": formatters_dict[\"SLASH_SEPARATED\"],\n \"smash\": formatters_dict[\"NO_SPACES\"],\n \"snake\": formatters_dict[\"SNAKE_CASE\"],\n \"string\": formatters_dict[\"SINGLE_QUOTED_STRING\"],\n \"title\": formatters_dict[\"CAPITALIZE_ALL_WORDS\"],\n}\n\nall_formatters = {}\nall_formatters.update(formatters_dict)\nall_formatters.update(formatters_words)\n\nmod = Module()\nmod.list(\"formatters\", desc=\"list of formatters\")\nmod.list(\n \"prose_formatter\",\n desc=\"words to start dictating prose, and the formatter they apply\",\n)\n\n\[email protected](rule=\"{self.formatters}+\")\ndef formatters(m) -> str:\n \"Returns a comma-separated string of formatters e.g. 'SNAKE,DUBSTRING'\"\n return \",\".join(m.formatters_list)\n\n\[email protected](\n # Note that if the user speaks something like \"snake dot\", it will\n # insert \"dot\" - otherwise, they wouldn't be able to insert punctuation\n # words directly.\n rule=\"<self.formatters> <user.text> (<user.text> | <user.formatter_immune>)*\"\n)\ndef format_text(m) -> str:\n \"Formats the text and returns a string\"\n out = \"\"\n formatters = m[0]\n for chunk in m[1:]:\n if isinstance(chunk, ImmuneString):\n out += chunk.string\n else:\n out += format_phrase(chunk, formatters)\n return out\n\n\nclass ImmuneString(object):\n \"\"\"Wrapper that makes a string immune from formatting.\"\"\"\n\n def __init__(self, string):\n self.string = string\n\n\[email protected](\n # Add anything else into this that you want to be able to speak during a\n # formatter.\n rule=\"(<user.symbol_key> | numb <number>)\"\n)\ndef formatter_immune(m) -> ImmuneString:\n \"\"\"Text that can be interspersed into a formatter, e.g. characters.\n\n It will be inserted directly, without being formatted.\n\n \"\"\"\n if hasattr(m, \"number\"):\n value = m.number\n else:\n value = m[0]\n return ImmuneString(str(value))\n\n\[email protected]_class\nclass Actions:\n def formatted_text(phrase: Union[str, Phrase], formatters: str) -> str:\n \"\"\"Formats a phrase according to formatters. formatters is a comma-separated string of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n return format_phrase(phrase, formatters)\n\n def insert_formatted(phrase: Union[str, Phrase], formatters: str):\n \"\"\"Inserts a phrase formatted according to formatters. Formatters is a comma separated list of formatters (e.g. 'CAPITALIZE_ALL_WORDS,DOUBLE_QUOTED_STRING')\"\"\"\n actions.insert(format_phrase(phrase, formatters))\n\n def insert_with_history(text: str):\n \"\"\"Inserts some text, remembering it in the phrase history.\"\"\"\n actions.user.add_phrase_to_history(text)\n actions.insert(text)\n\n def formatters_reformat_last(formatters: str) -> str:\n \"\"\"Clears and reformats last formatted phrase\"\"\"\n global last_phrase, last_phrase_formatted\n if actions.user.get_last_phrase() != last_phrase_formatted:\n # The last thing we inserted isn't the same as the last thing we\n # formatted, so abort.\n logging.warning(\n \"formatters_reformat_last(): Last phrase wasn't a formatter!\"\n )\n return\n actions.user.clear_last_phrase()\n actions.user.insert_formatted(last_phrase, formatters)\n\n def formatters_reformat_selection(formatters: str) -> str:\n \"\"\"Reformats the current selection.\"\"\"\n selected = edit.selected_text()\n if not selected:\n print(\"Asked to reformat selection, but nothing selected!\")\n return\n unformatted = unformat_text(selected)\n # Delete separately for compatibility with programs that don't overwrite\n # selected text (e.g. Emacs)\n edit.delete()\n text = actions.self.formatted_text(unformatted, formatters)\n actions.insert(text)\n return text\n\n def get_formatters_words():\n \"\"\"returns a list of words currently used as formatters, and a demonstration string using those formatters\"\"\"\n formatters_help_demo = {}\n for name in sorted(set(formatters_words.keys())):\n formatters_help_demo[name] = format_phrase_without_adding_to_history(['one', 'two', 'three'], name)\n return formatters_help_demo\n\n def reformat_text(text: str, formatters: str) -> str:\n \"\"\"Reformat the text.\"\"\"\n unformatted = unformat_text(text)\n return actions.user.formatted_text(unformatted, formatters)\n\n def insert_many(strings: List[str]) -> None:\n \"\"\"Insert a list of strings, sequentially.\"\"\"\n for string in strings:\n actions.insert(string)\n\ndef unformat_text(text: str) -> str:\n \"\"\"Remove format from text\"\"\"\n unformatted = re.sub(r\"[^\\w]+\", \" \", text)\n # Split on camelCase, including numbers\n # FIXME: handle non-ASCII letters!\n unformatted = re.sub(r\"(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|(?<=[a-zA-Z])(?=[0-9])|(?<=[0-9])(?=[a-zA-Z])\", \" \", unformatted)\n # TODO: Separate out studleycase vars\n return unformatted.lower()\n\n\nctx.lists[\"self.formatters\"] = formatters_words.keys()\nctx.lists[\"self.prose_formatter\"] = {\n \"say\": \"NOOP\",\n \"speak\": \"NOOP\",\n \"sentence\": \"CAPITALIZE_FIRST_WORD\",\n}\n", "path": "code/formatters.py"}]}
| 3,833 | 130 |
gh_patches_debug_12513
|
rasdani/github-patches
|
git_diff
|
pypa__pip-11417
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Completion in ZSH doesn't understand that arguments follow certain options
* Pip version: 9.0.1
* Python version: 2.7.12
* Operating system: Ubuntu 16.04
### Description:
Completion in Zsh uses the older `compctl` builtin and returns completions for long options that take arguments with a trailing equals sign. But compctl/Zsh doesn't understand that as meaning that the option takes an argument and adds a space after the equals sign and also tries to complete the next argument if you remove the equals sign as yet another option.
No idea if this is fixable using the older compctl, might want to migrate to the newer compsys... With compsys you will probably have to modify the completion output from Pip or preprocess it in shell code so that it fits what something like `_arguments` expects.
https://github.com/pypa/pip/pull/4842 will make it complete file names by re-implementing that inside pip instead of letting the shell handle it which means certain stuff like colored file names won't work or `zstyle` related settings for file name completion. And it still won't fix the fact that Zsh will add a space after the equals sign...
### What I've run:
```sh
➜ pip install --requirem<tab>
➜ pip install --requirement= # With a space
➜ pip install --requirement=<tab> # Nothing
➜ pip install --requirement= <tab>
➜ pip install --requirement= -- # Sigh...
```
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/pip/_internal/commands/completion.py`
Content:
```
1 import sys
2 import textwrap
3 from optparse import Values
4 from typing import List
5
6 from pip._internal.cli.base_command import Command
7 from pip._internal.cli.status_codes import SUCCESS
8 from pip._internal.utils.misc import get_prog
9
10 BASE_COMPLETION = """
11 # pip {shell} completion start{script}# pip {shell} completion end
12 """
13
14 COMPLETION_SCRIPTS = {
15 "bash": """
16 _pip_completion()
17 {{
18 COMPREPLY=( $( COMP_WORDS="${{COMP_WORDS[*]}}" \\
19 COMP_CWORD=$COMP_CWORD \\
20 PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )
21 }}
22 complete -o default -F _pip_completion {prog}
23 """,
24 "zsh": """
25 function _pip_completion {{
26 local words cword
27 read -Ac words
28 read -cn cword
29 reply=( $( COMP_WORDS="$words[*]" \\
30 COMP_CWORD=$(( cword-1 )) \\
31 PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
32 }}
33 compctl -K _pip_completion {prog}
34 """,
35 "fish": """
36 function __fish_complete_pip
37 set -lx COMP_WORDS (commandline -o) ""
38 set -lx COMP_CWORD ( \\
39 math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\
40 )
41 set -lx PIP_AUTO_COMPLETE 1
42 string split \\ -- (eval $COMP_WORDS[1])
43 end
44 complete -fa "(__fish_complete_pip)" -c {prog}
45 """,
46 "powershell": """
47 if ((Test-Path Function:\\TabExpansion) -and -not `
48 (Test-Path Function:\\_pip_completeBackup)) {{
49 Rename-Item Function:\\TabExpansion _pip_completeBackup
50 }}
51 function TabExpansion($line, $lastWord) {{
52 $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()
53 if ($lastBlock.StartsWith("{prog} ")) {{
54 $Env:COMP_WORDS=$lastBlock
55 $Env:COMP_CWORD=$lastBlock.Split().Length - 1
56 $Env:PIP_AUTO_COMPLETE=1
57 (& {prog}).Split()
58 Remove-Item Env:COMP_WORDS
59 Remove-Item Env:COMP_CWORD
60 Remove-Item Env:PIP_AUTO_COMPLETE
61 }}
62 elseif (Test-Path Function:\\_pip_completeBackup) {{
63 # Fall back on existing tab expansion
64 _pip_completeBackup $line $lastWord
65 }}
66 }}
67 """,
68 }
69
70
71 class CompletionCommand(Command):
72 """A helper command to be used for command completion."""
73
74 ignore_require_venv = True
75
76 def add_options(self) -> None:
77 self.cmd_opts.add_option(
78 "--bash",
79 "-b",
80 action="store_const",
81 const="bash",
82 dest="shell",
83 help="Emit completion code for bash",
84 )
85 self.cmd_opts.add_option(
86 "--zsh",
87 "-z",
88 action="store_const",
89 const="zsh",
90 dest="shell",
91 help="Emit completion code for zsh",
92 )
93 self.cmd_opts.add_option(
94 "--fish",
95 "-f",
96 action="store_const",
97 const="fish",
98 dest="shell",
99 help="Emit completion code for fish",
100 )
101 self.cmd_opts.add_option(
102 "--powershell",
103 "-p",
104 action="store_const",
105 const="powershell",
106 dest="shell",
107 help="Emit completion code for powershell",
108 )
109
110 self.parser.insert_option_group(0, self.cmd_opts)
111
112 def run(self, options: Values, args: List[str]) -> int:
113 """Prints the completion code of the given shell"""
114 shells = COMPLETION_SCRIPTS.keys()
115 shell_options = ["--" + shell for shell in sorted(shells)]
116 if options.shell in shells:
117 script = textwrap.dedent(
118 COMPLETION_SCRIPTS.get(options.shell, "").format(prog=get_prog())
119 )
120 print(BASE_COMPLETION.format(script=script, shell=options.shell))
121 return SUCCESS
122 else:
123 sys.stderr.write(
124 "ERROR: You must pass {}\n".format(" or ".join(shell_options))
125 )
126 return SUCCESS
127
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py
--- a/src/pip/_internal/commands/completion.py
+++ b/src/pip/_internal/commands/completion.py
@@ -22,15 +22,10 @@
complete -o default -F _pip_completion {prog}
""",
"zsh": """
- function _pip_completion {{
- local words cword
- read -Ac words
- read -cn cword
- reply=( $( COMP_WORDS="$words[*]" \\
- COMP_CWORD=$(( cword-1 )) \\
- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))
- }}
- compctl -K _pip_completion {prog}
+ #compdef -P pip[0-9.]#
+ compadd $( COMP_WORDS="$words[*]" \\
+ COMP_CWORD=$((CURRENT-1)) \\
+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )
""",
"fish": """
function __fish_complete_pip
|
{"golden_diff": "diff --git a/src/pip/_internal/commands/completion.py b/src/pip/_internal/commands/completion.py\n--- a/src/pip/_internal/commands/completion.py\n+++ b/src/pip/_internal/commands/completion.py\n@@ -22,15 +22,10 @@\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n- function _pip_completion {{\n- local words cword\n- read -Ac words\n- read -cn cword\n- reply=( $( COMP_WORDS=\"$words[*]\" \\\\\n- COMP_CWORD=$(( cword-1 )) \\\\\n- PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))\n- }}\n- compctl -K _pip_completion {prog}\n+ #compdef -P pip[0-9.]#\n+ compadd $( COMP_WORDS=\"$words[*]\" \\\\\n+ COMP_CWORD=$((CURRENT-1)) \\\\\n+ PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n", "issue": "Completion in ZSH doesn't understand that arguments follow certain options\n* Pip version: 9.0.1\r\n* Python version: 2.7.12\r\n* Operating system: Ubuntu 16.04\r\n\r\n### Description:\r\n\r\nCompletion in Zsh uses the older `compctl` builtin and returns completions for long options that take arguments with a trailing equals sign. But compctl/Zsh doesn't understand that as meaning that the option takes an argument and adds a space after the equals sign and also tries to complete the next argument if you remove the equals sign as yet another option.\r\n\r\nNo idea if this is fixable using the older compctl, might want to migrate to the newer compsys... With compsys you will probably have to modify the completion output from Pip or preprocess it in shell code so that it fits what something like `_arguments` expects.\r\n\r\nhttps://github.com/pypa/pip/pull/4842 will make it complete file names by re-implementing that inside pip instead of letting the shell handle it which means certain stuff like colored file names won't work or `zstyle` related settings for file name completion. And it still won't fix the fact that Zsh will add a space after the equals sign...\r\n\r\n### What I've run:\r\n\r\n```sh\r\n\u279c pip install --requirem<tab>\r\n\u279c pip install --requirement= # With a space\r\n\u279c pip install --requirement=<tab> # Nothing\r\n\u279c pip install --requirement= <tab>\r\n\u279c pip install --requirement= -- # Sigh...\r\n```\r\n\n", "before_files": [{"content": "import sys\nimport textwrap\nfrom optparse import Values\nfrom typing import List\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nBASE_COMPLETION = \"\"\"\n# pip {shell} completion start{script}# pip {shell} completion end\n\"\"\"\n\nCOMPLETION_SCRIPTS = {\n \"bash\": \"\"\"\n _pip_completion()\n {{\n COMPREPLY=( $( COMP_WORDS=\"${{COMP_WORDS[*]}}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )\n }}\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n function _pip_completion {{\n local words cword\n read -Ac words\n read -cn cword\n reply=( $( COMP_WORDS=\"$words[*]\" \\\\\n COMP_CWORD=$(( cword-1 )) \\\\\n PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null ))\n }}\n compctl -K _pip_completion {prog}\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n set -lx COMP_WORDS (commandline -o) \"\"\n set -lx COMP_CWORD ( \\\\\n math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\\\\n )\n set -lx PIP_AUTO_COMPLETE 1\n string split \\\\ -- (eval $COMP_WORDS[1])\n end\n complete -fa \"(__fish_complete_pip)\" -c {prog}\n \"\"\",\n \"powershell\": \"\"\"\n if ((Test-Path Function:\\\\TabExpansion) -and -not `\n (Test-Path Function:\\\\_pip_completeBackup)) {{\n Rename-Item Function:\\\\TabExpansion _pip_completeBackup\n }}\n function TabExpansion($line, $lastWord) {{\n $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()\n if ($lastBlock.StartsWith(\"{prog} \")) {{\n $Env:COMP_WORDS=$lastBlock\n $Env:COMP_CWORD=$lastBlock.Split().Length - 1\n $Env:PIP_AUTO_COMPLETE=1\n (& {prog}).Split()\n Remove-Item Env:COMP_WORDS\n Remove-Item Env:COMP_CWORD\n Remove-Item Env:PIP_AUTO_COMPLETE\n }}\n elseif (Test-Path Function:\\\\_pip_completeBackup) {{\n # Fall back on existing tab expansion\n _pip_completeBackup $line $lastWord\n }}\n }}\n \"\"\",\n}\n\n\nclass CompletionCommand(Command):\n \"\"\"A helper command to be used for command completion.\"\"\"\n\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n self.cmd_opts.add_option(\n \"--powershell\",\n \"-p\",\n action=\"store_const\",\n const=\"powershell\",\n dest=\"shell\",\n help=\"Emit completion code for powershell\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n \"\"\"Prints the completion code of the given shell\"\"\"\n shells = COMPLETION_SCRIPTS.keys()\n shell_options = [\"--\" + shell for shell in sorted(shells)]\n if options.shell in shells:\n script = textwrap.dedent(\n COMPLETION_SCRIPTS.get(options.shell, \"\").format(prog=get_prog())\n )\n print(BASE_COMPLETION.format(script=script, shell=options.shell))\n return SUCCESS\n else:\n sys.stderr.write(\n \"ERROR: You must pass {}\\n\".format(\" or \".join(shell_options))\n )\n return SUCCESS\n", "path": "src/pip/_internal/commands/completion.py"}], "after_files": [{"content": "import sys\nimport textwrap\nfrom optparse import Values\nfrom typing import List\n\nfrom pip._internal.cli.base_command import Command\nfrom pip._internal.cli.status_codes import SUCCESS\nfrom pip._internal.utils.misc import get_prog\n\nBASE_COMPLETION = \"\"\"\n# pip {shell} completion start{script}# pip {shell} completion end\n\"\"\"\n\nCOMPLETION_SCRIPTS = {\n \"bash\": \"\"\"\n _pip_completion()\n {{\n COMPREPLY=( $( COMP_WORDS=\"${{COMP_WORDS[*]}}\" \\\\\n COMP_CWORD=$COMP_CWORD \\\\\n PIP_AUTO_COMPLETE=1 $1 2>/dev/null ) )\n }}\n complete -o default -F _pip_completion {prog}\n \"\"\",\n \"zsh\": \"\"\"\n #compdef -P pip[0-9.]#\n compadd $( COMP_WORDS=\"$words[*]\" \\\\\n COMP_CWORD=$((CURRENT-1)) \\\\\n PIP_AUTO_COMPLETE=1 $words[1] 2>/dev/null )\n \"\"\",\n \"fish\": \"\"\"\n function __fish_complete_pip\n set -lx COMP_WORDS (commandline -o) \"\"\n set -lx COMP_CWORD ( \\\\\n math (contains -i -- (commandline -t) $COMP_WORDS)-1 \\\\\n )\n set -lx PIP_AUTO_COMPLETE 1\n string split \\\\ -- (eval $COMP_WORDS[1])\n end\n complete -fa \"(__fish_complete_pip)\" -c {prog}\n \"\"\",\n \"powershell\": \"\"\"\n if ((Test-Path Function:\\\\TabExpansion) -and -not `\n (Test-Path Function:\\\\_pip_completeBackup)) {{\n Rename-Item Function:\\\\TabExpansion _pip_completeBackup\n }}\n function TabExpansion($line, $lastWord) {{\n $lastBlock = [regex]::Split($line, '[|;]')[-1].TrimStart()\n if ($lastBlock.StartsWith(\"{prog} \")) {{\n $Env:COMP_WORDS=$lastBlock\n $Env:COMP_CWORD=$lastBlock.Split().Length - 1\n $Env:PIP_AUTO_COMPLETE=1\n (& {prog}).Split()\n Remove-Item Env:COMP_WORDS\n Remove-Item Env:COMP_CWORD\n Remove-Item Env:PIP_AUTO_COMPLETE\n }}\n elseif (Test-Path Function:\\\\_pip_completeBackup) {{\n # Fall back on existing tab expansion\n _pip_completeBackup $line $lastWord\n }}\n }}\n \"\"\",\n}\n\n\nclass CompletionCommand(Command):\n \"\"\"A helper command to be used for command completion.\"\"\"\n\n ignore_require_venv = True\n\n def add_options(self) -> None:\n self.cmd_opts.add_option(\n \"--bash\",\n \"-b\",\n action=\"store_const\",\n const=\"bash\",\n dest=\"shell\",\n help=\"Emit completion code for bash\",\n )\n self.cmd_opts.add_option(\n \"--zsh\",\n \"-z\",\n action=\"store_const\",\n const=\"zsh\",\n dest=\"shell\",\n help=\"Emit completion code for zsh\",\n )\n self.cmd_opts.add_option(\n \"--fish\",\n \"-f\",\n action=\"store_const\",\n const=\"fish\",\n dest=\"shell\",\n help=\"Emit completion code for fish\",\n )\n self.cmd_opts.add_option(\n \"--powershell\",\n \"-p\",\n action=\"store_const\",\n const=\"powershell\",\n dest=\"shell\",\n help=\"Emit completion code for powershell\",\n )\n\n self.parser.insert_option_group(0, self.cmd_opts)\n\n def run(self, options: Values, args: List[str]) -> int:\n \"\"\"Prints the completion code of the given shell\"\"\"\n shells = COMPLETION_SCRIPTS.keys()\n shell_options = [\"--\" + shell for shell in sorted(shells)]\n if options.shell in shells:\n script = textwrap.dedent(\n COMPLETION_SCRIPTS.get(options.shell, \"\").format(prog=get_prog())\n )\n print(BASE_COMPLETION.format(script=script, shell=options.shell))\n return SUCCESS\n else:\n sys.stderr.write(\n \"ERROR: You must pass {}\\n\".format(\" or \".join(shell_options))\n )\n return SUCCESS\n", "path": "src/pip/_internal/commands/completion.py"}]}
| 1,793 | 247 |
gh_patches_debug_27845
|
rasdani/github-patches
|
git_diff
|
Lightning-AI__torchmetrics-2001
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ClipScore errors on captions with more than 77 tokens
## 🐛 Bug
If you run [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html) between an image and a caption, where the caption has more than 77 tokens (longer than the max string than CLIP can process) -- the clip score errors.
### To Reproduce
Compute CLIPScore between a caption with 77+ tokens and an image.
<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->
<details>
<summary>Code sample</summary>
```
metric = CLIPScore(model_name_or_path="openai/clip-vit-base-patch32")
metric.to('cuda')
clip_score = metric(image_tensor, caption)
```
```
Traceback (most recent call last):
File "/usr/lib/python3.8/runpy.py", line 194, in _run_module_as_main
return _run_code(code, main_globals, None,
File "/usr/lib/python3.8/runpy.py", line 87, in _run_code
exec(code, run_globals)
File "/home/user/scripts/compute_clip_scores.py", line 125, in <module>
compute_clip_scores(response=response,
File "/home/user/scripts/compute_clip_scores.py", line 87, in compute_clip_scores
clip_score = metric(image_tensor, caption)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 288, in forward
self._forward_cache = self._forward_full_state_update(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 302, in _forward_full_state_update
self.update(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 456, in wrapped_func
raise err
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py", line 446, in wrapped_func
update(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/multimodal/clip_score.py", line 123, in update
score, n_samples = _clip_score_update(images, text, self.model, self.processor)
File "/home/user/.local/lib/python3.8/site-packages/torchmetrics/functional/multimodal/clip_score.py", line 69, in _clip_score_update
txt_features = model.get_text_features(
File "/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 1017, in get_text_features
text_outputs = self.text_model(
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 730, in forward
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
File "/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py", line 230, in forward
embeddings = inputs_embeds + position_embeddings
RuntimeError: The size of tensor a (138) must match the size of tensor b (77) at non-singleton dimension 1
```
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
</details>
### Expected behavior
Present a warning to the user and truncate the caption so that the metric can be computed on the first 77 tokens of the provided caption
### Environment
- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): **1.0.3, pip**
- Python & PyTorch Version (e.g., 1.0): **Python 3.8.10, PyTorch 2.0.1+cu118**
- Any other relevant information such as OS (e.g., Linux): **Linux**
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/torchmetrics/functional/multimodal/clip_score.py`
Content:
```
1 # Copyright The Lightning team.
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14 from typing import List, Tuple, Union
15
16 import torch
17 from torch import Tensor
18 from typing_extensions import Literal
19
20 from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
21 from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
22
23 if _TRANSFORMERS_GREATER_EQUAL_4_10:
24 from transformers import CLIPModel as _CLIPModel
25 from transformers import CLIPProcessor as _CLIPProcessor
26
27 def _download_clip() -> None:
28 _CLIPModel.from_pretrained("openai/clip-vit-large-patch14")
29 _CLIPProcessor.from_pretrained("openai/clip-vit-large-patch14")
30
31 if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):
32 __doctest_skip__ = ["clip_score"]
33
34 else:
35 __doctest_skip__ = ["clip_score"]
36 _CLIPModel = None
37 _CLIPProcessor = None
38
39
40 def _clip_score_update(
41 images: Union[Tensor, List[Tensor]],
42 text: Union[str, List[str]],
43 model: _CLIPModel,
44 processor: _CLIPProcessor,
45 ) -> Tuple[Tensor, int]:
46 if not isinstance(images, list):
47 if images.ndim == 3:
48 images = [images]
49 else: # unwrap into list
50 images = list(images)
51
52 if not all(i.ndim == 3 for i in images):
53 raise ValueError("Expected all images to be 3d but found image that has either more or less")
54
55 if not isinstance(text, list):
56 text = [text]
57
58 if len(text) != len(images):
59 raise ValueError(
60 f"Expected the number of images and text examples to be the same but got {len(images)} and {len(text)}"
61 )
62 device = images[0].device
63 processed_input = processor(text=text, images=[i.cpu() for i in images], return_tensors="pt", padding=True)
64
65 img_features = model.get_image_features(processed_input["pixel_values"].to(device))
66 img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)
67
68 txt_features = model.get_text_features(
69 processed_input["input_ids"].to(device), processed_input["attention_mask"].to(device)
70 )
71 txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)
72
73 # cosine similarity between feature vectors
74 score = 100 * (img_features * txt_features).sum(axis=-1)
75 return score, len(text)
76
77
78 def _get_model_and_processor(
79 model_name_or_path: Literal[
80 "openai/clip-vit-base-patch16",
81 "openai/clip-vit-base-patch32",
82 "openai/clip-vit-large-patch14-336",
83 "openai/clip-vit-large-patch14",
84 ] = "openai/clip-vit-large-patch14",
85 ) -> Tuple[_CLIPModel, _CLIPProcessor]:
86 if _TRANSFORMERS_GREATER_EQUAL_4_10:
87 model = _CLIPModel.from_pretrained(model_name_or_path)
88 processor = _CLIPProcessor.from_pretrained(model_name_or_path)
89 return model, processor
90
91 raise ModuleNotFoundError(
92 "`clip_score` metric requires `transformers` package be installed."
93 " Either install with `pip install transformers>=4.10.0` or `pip install torchmetrics[multimodal]`."
94 )
95
96
97 def clip_score(
98 images: Union[Tensor, List[Tensor]],
99 text: Union[str, List[str]],
100 model_name_or_path: Literal[
101 "openai/clip-vit-base-patch16",
102 "openai/clip-vit-base-patch32",
103 "openai/clip-vit-large-patch14-336",
104 "openai/clip-vit-large-patch14",
105 ] = "openai/clip-vit-large-patch14",
106 ) -> Tensor:
107 r"""Calculate `CLIP Score`_ which is a text-to-image similarity metric.
108
109 CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an
110 image and the actual content of the image. It has been found to be highly correlated with human judgement. The
111 metric is defined as:
112
113 .. math::
114 \text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)
115
116 which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and
117 textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer
118 to 100 the better.
119
120 .. note:: Metric is not scriptable
121
122 Args:
123 images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors
124 text: Either a single caption or a list of captions
125 model_name_or_path: string indicating the version of the CLIP model to use. Available models are
126 `"openai/clip-vit-base-patch16"`, `"openai/clip-vit-base-patch32"`, `"openai/clip-vit-large-patch14-336"`
127 and `"openai/clip-vit-large-patch14"`,
128
129 Raises:
130 ModuleNotFoundError:
131 If transformers package is not installed or version is lower than 4.10.0
132 ValueError:
133 If not all images have format [C, H, W]
134 ValueError:
135 If the number of images and captions do not match
136
137 Example:
138 >>> import torch
139 >>> _ = torch.manual_seed(42)
140 >>> from torchmetrics.functional.multimodal import clip_score
141 >>> score = clip_score(torch.randint(255, (3, 224, 224)), "a photo of a cat", "openai/clip-vit-base-patch16")
142 >>> print(score.detach())
143 tensor(24.4255)
144
145 """
146 model, processor = _get_model_and_processor(model_name_or_path)
147 device = images.device if isinstance(images, Tensor) else images[0].device
148 score, _ = _clip_score_update(images, text, model.to(device), processor)
149 score = score.mean(0)
150 return torch.max(score, torch.zeros_like(score))
151
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/torchmetrics/functional/multimodal/clip_score.py b/src/torchmetrics/functional/multimodal/clip_score.py
--- a/src/torchmetrics/functional/multimodal/clip_score.py
+++ b/src/torchmetrics/functional/multimodal/clip_score.py
@@ -17,6 +17,7 @@
from torch import Tensor
from typing_extensions import Literal
+from torchmetrics.utilities import rank_zero_warn
from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout
from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10
@@ -65,6 +66,17 @@
img_features = model.get_image_features(processed_input["pixel_values"].to(device))
img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)
+ max_position_embeddings = model.config.text_config.max_position_embeddings
+ if processed_input["attention_mask"].shape[-1] > max_position_embeddings:
+ rank_zero_warn(
+ f"Encountered caption longer than {max_position_embeddings=}. Will truncate captions to this length."
+ "If longer captions are needed, initialize argument `model_name_or_path` with a model that supports"
+ "longer sequences",
+ UserWarning,
+ )
+ processed_input["attention_mask"] = processed_input["attention_mask"][..., :max_position_embeddings]
+ processed_input["input_ids"] = processed_input["input_ids"][..., :max_position_embeddings]
+
txt_features = model.get_text_features(
processed_input["input_ids"].to(device), processed_input["attention_mask"].to(device)
)
|
{"golden_diff": "diff --git a/src/torchmetrics/functional/multimodal/clip_score.py b/src/torchmetrics/functional/multimodal/clip_score.py\n--- a/src/torchmetrics/functional/multimodal/clip_score.py\n+++ b/src/torchmetrics/functional/multimodal/clip_score.py\n@@ -17,6 +17,7 @@\n from torch import Tensor\n from typing_extensions import Literal\n \n+from torchmetrics.utilities import rank_zero_warn\n from torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\n from torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10\n \n@@ -65,6 +66,17 @@\n img_features = model.get_image_features(processed_input[\"pixel_values\"].to(device))\n img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)\n \n+ max_position_embeddings = model.config.text_config.max_position_embeddings\n+ if processed_input[\"attention_mask\"].shape[-1] > max_position_embeddings:\n+ rank_zero_warn(\n+ f\"Encountered caption longer than {max_position_embeddings=}. Will truncate captions to this length.\"\n+ \"If longer captions are needed, initialize argument `model_name_or_path` with a model that supports\"\n+ \"longer sequences\",\n+ UserWarning,\n+ )\n+ processed_input[\"attention_mask\"] = processed_input[\"attention_mask\"][..., :max_position_embeddings]\n+ processed_input[\"input_ids\"] = processed_input[\"input_ids\"][..., :max_position_embeddings]\n+\n txt_features = model.get_text_features(\n processed_input[\"input_ids\"].to(device), processed_input[\"attention_mask\"].to(device)\n )\n", "issue": "ClipScore errors on captions with more than 77 tokens\n## \ud83d\udc1b Bug\r\n\r\nIf you run [CLIPScore](https://torchmetrics.readthedocs.io/en/stable/multimodal/clip_score.html) between an image and a caption, where the caption has more than 77 tokens (longer than the max string than CLIP can process) -- the clip score errors.\r\n\r\n### To Reproduce\r\nCompute CLIPScore between a caption with 77+ tokens and an image.\r\n\r\n<!-- If you have a code sample, error messages, stack traces, please provide it here as well -->\r\n\r\n<details>\r\n <summary>Code sample</summary>\r\n\r\n```\r\nmetric = CLIPScore(model_name_or_path=\"openai/clip-vit-base-patch32\")\r\nmetric.to('cuda')\r\nclip_score = metric(image_tensor, caption)\r\n```\r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"/usr/lib/python3.8/runpy.py\", line 194, in _run_module_as_main\r\n return _run_code(code, main_globals, None,\r\n File \"/usr/lib/python3.8/runpy.py\", line 87, in _run_code\r\n exec(code, run_globals)\r\n File \"/home/user/scripts/compute_clip_scores.py\", line 125, in <module>\r\n compute_clip_scores(response=response,\r\n File \"/home/user/scripts/compute_clip_scores.py\", line 87, in compute_clip_scores\r\n clip_score = metric(image_tensor, caption)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 288, in forward\r\n self._forward_cache = self._forward_full_state_update(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 302, in _forward_full_state_update\r\n self.update(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 456, in wrapped_func\r\n raise err\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/metric.py\", line 446, in wrapped_func\r\n update(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/multimodal/clip_score.py\", line 123, in update\r\n score, n_samples = _clip_score_update(images, text, self.model, self.processor)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torchmetrics/functional/multimodal/clip_score.py\", line 69, in _clip_score_update\r\n txt_features = model.get_text_features(\r\n File \"/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py\", line 1017, in get_text_features\r\n text_outputs = self.text_model(\r\n File \"/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py\", line 730, in forward\r\n hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)\r\n File \"/home/user/.local/lib/python3.8/site-packages/torch/nn/modules/module.py\", line 1501, in _call_impl\r\n return forward_call(*args, **kwargs)\r\n File \"/home/user/.local/lib/python3.8/site-packages/transformers/models/clip/modeling_clip.py\", line 230, in forward\r\n embeddings = inputs_embeds + position_embeddings\r\nRuntimeError: The size of tensor a (138) must match the size of tensor b (77) at non-singleton dimension 1\r\n```\r\n\r\n\r\n<!-- Ideally attach a minimal code sample to reproduce the decried issue.\r\nMinimal means having the shortest code but still preserving the bug. -->\r\n\r\n</details>\r\n\r\n### Expected behavior\r\n\r\nPresent a warning to the user and truncate the caption so that the metric can be computed on the first 77 tokens of the provided caption\r\n\r\n### Environment\r\n\r\n- TorchMetrics version (and how you installed TM, e.g. `conda`, `pip`, build from source): **1.0.3, pip**\r\n- Python & PyTorch Version (e.g., 1.0): **Python 3.8.10, PyTorch 2.0.1+cu118**\r\n- Any other relevant information such as OS (e.g., Linux): **Linux**\r\n\n", "before_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\nfrom torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10\n\nif _TRANSFORMERS_GREATER_EQUAL_4_10:\n from transformers import CLIPModel as _CLIPModel\n from transformers import CLIPProcessor as _CLIPProcessor\n\n def _download_clip() -> None:\n _CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n _CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\")\n\n if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):\n __doctest_skip__ = [\"clip_score\"]\n\nelse:\n __doctest_skip__ = [\"clip_score\"]\n _CLIPModel = None\n _CLIPProcessor = None\n\n\ndef _clip_score_update(\n images: Union[Tensor, List[Tensor]],\n text: Union[str, List[str]],\n model: _CLIPModel,\n processor: _CLIPProcessor,\n) -> Tuple[Tensor, int]:\n if not isinstance(images, list):\n if images.ndim == 3:\n images = [images]\n else: # unwrap into list\n images = list(images)\n\n if not all(i.ndim == 3 for i in images):\n raise ValueError(\"Expected all images to be 3d but found image that has either more or less\")\n\n if not isinstance(text, list):\n text = [text]\n\n if len(text) != len(images):\n raise ValueError(\n f\"Expected the number of images and text examples to be the same but got {len(images)} and {len(text)}\"\n )\n device = images[0].device\n processed_input = processor(text=text, images=[i.cpu() for i in images], return_tensors=\"pt\", padding=True)\n\n img_features = model.get_image_features(processed_input[\"pixel_values\"].to(device))\n img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)\n\n txt_features = model.get_text_features(\n processed_input[\"input_ids\"].to(device), processed_input[\"attention_mask\"].to(device)\n )\n txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)\n\n # cosine similarity between feature vectors\n score = 100 * (img_features * txt_features).sum(axis=-1)\n return score, len(text)\n\n\ndef _get_model_and_processor(\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = \"openai/clip-vit-large-patch14\",\n) -> Tuple[_CLIPModel, _CLIPProcessor]:\n if _TRANSFORMERS_GREATER_EQUAL_4_10:\n model = _CLIPModel.from_pretrained(model_name_or_path)\n processor = _CLIPProcessor.from_pretrained(model_name_or_path)\n return model, processor\n\n raise ModuleNotFoundError(\n \"`clip_score` metric requires `transformers` package be installed.\"\n \" Either install with `pip install transformers>=4.10.0` or `pip install torchmetrics[multimodal]`.\"\n )\n\n\ndef clip_score(\n images: Union[Tensor, List[Tensor]],\n text: Union[str, List[str]],\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = \"openai/clip-vit-large-patch14\",\n) -> Tensor:\n r\"\"\"Calculate `CLIP Score`_ which is a text-to-image similarity metric.\n\n CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an\n image and the actual content of the image. It has been found to be highly correlated with human judgement. The\n metric is defined as:\n\n .. math::\n \\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)\n\n which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and\n textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer\n to 100 the better.\n\n .. note:: Metric is not scriptable\n\n Args:\n images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors\n text: Either a single caption or a list of captions\n model_name_or_path: string indicating the version of the CLIP model to use. Available models are\n `\"openai/clip-vit-base-patch16\"`, `\"openai/clip-vit-base-patch32\"`, `\"openai/clip-vit-large-patch14-336\"`\n and `\"openai/clip-vit-large-patch14\"`,\n\n Raises:\n ModuleNotFoundError:\n If transformers package is not installed or version is lower than 4.10.0\n ValueError:\n If not all images have format [C, H, W]\n ValueError:\n If the number of images and captions do not match\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics.functional.multimodal import clip_score\n >>> score = clip_score(torch.randint(255, (3, 224, 224)), \"a photo of a cat\", \"openai/clip-vit-base-patch16\")\n >>> print(score.detach())\n tensor(24.4255)\n\n \"\"\"\n model, processor = _get_model_and_processor(model_name_or_path)\n device = images.device if isinstance(images, Tensor) else images[0].device\n score, _ = _clip_score_update(images, text, model.to(device), processor)\n score = score.mean(0)\n return torch.max(score, torch.zeros_like(score))\n", "path": "src/torchmetrics/functional/multimodal/clip_score.py"}], "after_files": [{"content": "# Copyright The Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom typing import List, Tuple, Union\n\nimport torch\nfrom torch import Tensor\nfrom typing_extensions import Literal\n\nfrom torchmetrics.utilities import rank_zero_warn\nfrom torchmetrics.utilities.checks import _SKIP_SLOW_DOCTEST, _try_proceed_with_timeout\nfrom torchmetrics.utilities.imports import _TRANSFORMERS_GREATER_EQUAL_4_10\n\nif _TRANSFORMERS_GREATER_EQUAL_4_10:\n from transformers import CLIPModel as _CLIPModel\n from transformers import CLIPProcessor as _CLIPProcessor\n\n def _download_clip() -> None:\n _CLIPModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n _CLIPProcessor.from_pretrained(\"openai/clip-vit-large-patch14\")\n\n if _SKIP_SLOW_DOCTEST and not _try_proceed_with_timeout(_download_clip):\n __doctest_skip__ = [\"clip_score\"]\n\nelse:\n __doctest_skip__ = [\"clip_score\"]\n _CLIPModel = None\n _CLIPProcessor = None\n\n\ndef _clip_score_update(\n images: Union[Tensor, List[Tensor]],\n text: Union[str, List[str]],\n model: _CLIPModel,\n processor: _CLIPProcessor,\n) -> Tuple[Tensor, int]:\n if not isinstance(images, list):\n if images.ndim == 3:\n images = [images]\n else: # unwrap into list\n images = list(images)\n\n if not all(i.ndim == 3 for i in images):\n raise ValueError(\"Expected all images to be 3d but found image that has either more or less\")\n\n if not isinstance(text, list):\n text = [text]\n\n if len(text) != len(images):\n raise ValueError(\n f\"Expected the number of images and text examples to be the same but got {len(images)} and {len(text)}\"\n )\n device = images[0].device\n processed_input = processor(text=text, images=[i.cpu() for i in images], return_tensors=\"pt\", padding=True)\n\n img_features = model.get_image_features(processed_input[\"pixel_values\"].to(device))\n img_features = img_features / img_features.norm(p=2, dim=-1, keepdim=True)\n\n max_position_embeddings = model.config.text_config.max_position_embeddings\n if processed_input[\"attention_mask\"].shape[-1] > max_position_embeddings:\n rank_zero_warn(\n f\"Encountered caption longer than {max_position_embeddings=}. Will truncate captions to this length.\"\n \"If longer captions are needed, initialize argument `model_name_or_path` with a model that supports\"\n \"longer sequences\",\n UserWarning,\n )\n processed_input[\"attention_mask\"] = processed_input[\"attention_mask\"][..., :max_position_embeddings]\n processed_input[\"input_ids\"] = processed_input[\"input_ids\"][..., :max_position_embeddings]\n\n txt_features = model.get_text_features(\n processed_input[\"input_ids\"].to(device), processed_input[\"attention_mask\"].to(device)\n )\n txt_features = txt_features / txt_features.norm(p=2, dim=-1, keepdim=True)\n\n # cosine similarity between feature vectors\n score = 100 * (img_features * txt_features).sum(axis=-1)\n return score, len(text)\n\n\ndef _get_model_and_processor(\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = \"openai/clip-vit-large-patch14\",\n) -> Tuple[_CLIPModel, _CLIPProcessor]:\n if _TRANSFORMERS_GREATER_EQUAL_4_10:\n model = _CLIPModel.from_pretrained(model_name_or_path)\n processor = _CLIPProcessor.from_pretrained(model_name_or_path)\n return model, processor\n\n raise ModuleNotFoundError(\n \"`clip_score` metric requires `transformers` package be installed.\"\n \" Either install with `pip install transformers>=4.10.0` or `pip install torchmetrics[multimodal]`.\"\n )\n\n\ndef clip_score(\n images: Union[Tensor, List[Tensor]],\n text: Union[str, List[str]],\n model_name_or_path: Literal[\n \"openai/clip-vit-base-patch16\",\n \"openai/clip-vit-base-patch32\",\n \"openai/clip-vit-large-patch14-336\",\n \"openai/clip-vit-large-patch14\",\n ] = \"openai/clip-vit-large-patch14\",\n) -> Tensor:\n r\"\"\"Calculate `CLIP Score`_ which is a text-to-image similarity metric.\n\n CLIP is a reference free metric that can be used to evaluate the correlation between a generated caption for an\n image and the actual content of the image. It has been found to be highly correlated with human judgement. The\n metric is defined as:\n\n .. math::\n \\text{CLIPScore(I, C)} = max(100 * cos(E_I, E_C), 0)\n\n which corresponds to the cosine similarity between visual CLIP embedding :math:`E_i` for an image :math:`i` and\n textual CLIP embedding :math:`E_C` for an caption :math:`C`. The score is bound between 0 and 100 and the closer\n to 100 the better.\n\n .. note:: Metric is not scriptable\n\n Args:\n images: Either a single [N, C, H, W] tensor or a list of [C, H, W] tensors\n text: Either a single caption or a list of captions\n model_name_or_path: string indicating the version of the CLIP model to use. Available models are\n `\"openai/clip-vit-base-patch16\"`, `\"openai/clip-vit-base-patch32\"`, `\"openai/clip-vit-large-patch14-336\"`\n and `\"openai/clip-vit-large-patch14\"`,\n\n Raises:\n ModuleNotFoundError:\n If transformers package is not installed or version is lower than 4.10.0\n ValueError:\n If not all images have format [C, H, W]\n ValueError:\n If the number of images and captions do not match\n\n Example:\n >>> import torch\n >>> _ = torch.manual_seed(42)\n >>> from torchmetrics.functional.multimodal import clip_score\n >>> score = clip_score(torch.randint(255, (3, 224, 224)), \"a photo of a cat\", \"openai/clip-vit-base-patch16\")\n >>> print(score.detach())\n tensor(24.4255)\n\n \"\"\"\n model, processor = _get_model_and_processor(model_name_or_path)\n device = images.device if isinstance(images, Tensor) else images[0].device\n score, _ = _clip_score_update(images, text, model.to(device), processor)\n score = score.mean(0)\n return torch.max(score, torch.zeros_like(score))\n", "path": "src/torchmetrics/functional/multimodal/clip_score.py"}]}
| 3,236 | 369 |
gh_patches_debug_11566
|
rasdani/github-patches
|
git_diff
|
DDMAL__CantusDB-759
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Hide "Edit chants (Fulltext & Volpiano editor)" link from my sources sidebar on flatpages
To prevent encountering a 404 error, we should hide the link from the "My Sources" sidebar on flatpages when the corresponding source has no chants.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `django/cantusdb_project/main_app/templatetags/helper_tags.py`
Content:
```
1 import calendar
2 from typing import Union, Optional
3 from django import template
4 from main_app.models import Source
5 from articles.models import Article
6 from django.utils.safestring import mark_safe
7 from django.urls import reverse
8 from django.core.paginator import Paginator
9
10
11 register = template.Library()
12
13
14 @register.simple_tag(takes_context=False)
15 def recent_articles():
16 """
17 Generates a html unordered list of recent articles for display on the homepage
18
19 Used in:
20 templates/flatpages/default.html
21 """
22 articles = Article.objects.order_by("-date_created")[:5]
23 list_item_template = '<li style="padding-bottom: 0.5em;"><a href="{url}">{title}</a><br><small>{date}</small></li>'
24 list_items = [
25 list_item_template.format(
26 url=a.get_absolute_url(),
27 title=a.title,
28 date=a.date_created.strftime("%A %B %-d, %Y"),
29 )
30 for a in articles
31 ]
32 list_items_string = "".join(list_items)
33 recent_articles_string = "<ul>{lis}</ul>".format(lis=list_items_string)
34 return mark_safe(recent_articles_string)
35
36
37 @register.simple_tag(takes_context=False)
38 def my_sources(user):
39 """
40 Generates a html unordered list of sources the currently logged-in user has access to edit, for display on the homepage
41
42 Used in:
43 templates/flatpages/default.html
44 """
45
46 def make_source_detail_link_with_siglum(source):
47 id = source.id
48 siglum = source.rism_siglum
49 url = reverse("source-detail", args=[id])
50 link = '<a href="{}">{}</a>'.format(url, siglum)
51 return link
52
53 def make_source_detail_link_with_title(source):
54 id = source.id
55 title = source.title
56 url = reverse("source-detail", args=[id])
57 link = '<a href="{}">{}</a>'.format(url, title)
58 return link
59
60 def make_add_new_chants_link(source):
61 id = source.id
62 url = reverse("chant-create", args=[id])
63 link = '<a href="{}">+ Add new chant</a>'.format(url)
64 return link
65
66 def make_edit_chants_link(source):
67 id = source.id
68 url = reverse("source-edit-chants", args=[id])
69 link = '<a href="{}">Edit chants (Fulltext & Volpiano editor)</a>'.format(url)
70 return link
71
72 def make_links_for_source(source):
73 link_with_siglum = make_source_detail_link_with_siglum(source)
74 link_with_title = make_source_detail_link_with_title(source)
75 add_new_chants_link = make_add_new_chants_link(source)
76 edit_chants_link = make_edit_chants_link(source)
77 template = """{sigl}<br>
78 <small>
79 <b>{title}</b><br>
80 {add}<br>
81 {edit}<br>
82 </small>
83 """
84 links_string = template.format(
85 sigl=link_with_siglum,
86 title=link_with_title,
87 add=add_new_chants_link,
88 edit=edit_chants_link,
89 )
90 return links_string
91
92 MAX_SOURCES_TO_DISPLAY = 6
93 sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]
94 source_links = [make_links_for_source(source) for source in sources]
95 list_items = ["<li>{}</li>".format(link) for link in source_links]
96 joined_list_items = "".join(list_items)
97 links_ul = "<ul>{}</ul>".format(joined_list_items)
98 return mark_safe(links_ul)
99
100
101 @register.filter(name="month_to_string")
102 def month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:
103 """
104 Converts month number to textual representation, 3 letters (Jan, Mar, etc)
105
106 used in:
107 main_app/templates/feast_detail.html
108 main_app/templates/feast_list.html
109 """
110 if type(value) == int and value in range(1, 13):
111 return calendar.month_abbr[value]
112 else:
113 return value
114
115
116 @register.simple_tag(takes_context=True)
117 def url_add_get_params(context, **kwargs):
118 """
119 accounts for the situations where there may be two paginations in one page
120
121 Used in:
122 main_app/templates/pagination.html
123 main_app/templates/user_source_list.html
124 """
125 query = context["request"].GET.copy()
126 if "page" in kwargs:
127 query.pop("page", None)
128 if "page2" in kwargs:
129 query.pop("page2", None)
130 query.update(kwargs)
131 return query.urlencode()
132
133
134 @register.simple_tag(takes_context=False)
135 def source_links():
136 """
137 Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage
138
139 Used in:
140 templates/flatpages/default.html
141 """
142 sources = (
143 Source.objects.filter(published=True, segment__id=4063)
144 .exclude(siglum=None)
145 .values("siglum", "id")
146 .order_by("siglum")
147 )
148 options = ""
149 for source in sources:
150 option_str = (
151 f"<option value=source/{source['id']}>{source['siglum']}</option>\n"
152 )
153 options += option_str
154
155 return mark_safe(options)
156
157
158 @register.filter
159 def classname(obj):
160 """
161 Returns the name of the object's class
162 A use-case is: {% if object|classname == "Notation" %}
163
164 Used in:
165 main_app/templates/content_overview.html
166 """
167 return obj.__class__.__name__
168
169
170 @register.filter
171 def admin_url_name(class_name, action):
172 """
173 Accepts the name of a class in "main_app", and an action (either "change" or "delete") as arguments.
174 Returns the name of the URL for changing/deleting an object in the admin interface.
175
176 Used in:
177 main_app/templates/content_overview.html
178 """
179 class_name = class_name.lower()
180 action = action.lower()
181
182 return f"admin:main_app_{class_name}_{action}"
183
184
185 @register.filter(name="has_group")
186 def has_group(user, group_name):
187 """
188 Used in:
189 templates/base.html
190 """
191 return user.groups.filter(name=group_name).exists()
192
193
194 @register.simple_tag(takes_context=True)
195 def get_user_source_pagination(context):
196 user_created_sources = (
197 Source.objects.filter(created_by=context["user"])
198 .order_by("-date_created")
199 .distinct()
200 )
201 paginator = Paginator(user_created_sources, 10)
202 page_number = context["request"].GET.get("page")
203 page_obj = paginator.get_page(page_number)
204 return page_obj
205
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py
--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py
+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py
@@ -73,7 +73,10 @@
link_with_siglum = make_source_detail_link_with_siglum(source)
link_with_title = make_source_detail_link_with_title(source)
add_new_chants_link = make_add_new_chants_link(source)
- edit_chants_link = make_edit_chants_link(source)
+ if source.chant_set.exists():
+ edit_chants_link = make_edit_chants_link(source)
+ else:
+ edit_chants_link = ""
template = """{sigl}<br>
<small>
<b>{title}</b><br>
|
{"golden_diff": "diff --git a/django/cantusdb_project/main_app/templatetags/helper_tags.py b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n--- a/django/cantusdb_project/main_app/templatetags/helper_tags.py\n+++ b/django/cantusdb_project/main_app/templatetags/helper_tags.py\n@@ -73,7 +73,10 @@\n link_with_siglum = make_source_detail_link_with_siglum(source)\n link_with_title = make_source_detail_link_with_title(source)\n add_new_chants_link = make_add_new_chants_link(source)\n- edit_chants_link = make_edit_chants_link(source)\n+ if source.chant_set.exists():\n+ edit_chants_link = make_edit_chants_link(source)\n+ else:\n+ edit_chants_link = \"\"\n template = \"\"\"{sigl}<br>\n <small>\n <b>{title}</b><br>\n", "issue": "Hide \"Edit chants (Fulltext & Volpiano editor)\" link from my sources sidebar on flatpages\nTo prevent encountering a 404 error, we should hide the link from the \"My Sources\" sidebar on flatpages when the corresponding source has no chants.\n", "before_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django import template\nfrom main_app.models import Source\nfrom articles.models import Article\nfrom django.utils.safestring import mark_safe\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\n\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=False)\ndef recent_articles():\n \"\"\"\n Generates a html unordered list of recent articles for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n articles = Article.objects.order_by(\"-date_created\")[:5]\n list_item_template = '<li style=\"padding-bottom: 0.5em;\"><a href=\"{url}\">{title}</a><br><small>{date}</small></li>'\n list_items = [\n list_item_template.format(\n url=a.get_absolute_url(),\n title=a.title,\n date=a.date_created.strftime(\"%A %B %-d, %Y\"),\n )\n for a in articles\n ]\n list_items_string = \"\".join(list_items)\n recent_articles_string = \"<ul>{lis}</ul>\".format(lis=list_items_string)\n return mark_safe(recent_articles_string)\n\n\[email protected]_tag(takes_context=False)\ndef my_sources(user):\n \"\"\"\n Generates a html unordered list of sources the currently logged-in user has access to edit, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n\n def make_source_detail_link_with_siglum(source):\n id = source.id\n siglum = source.rism_siglum\n url = reverse(\"source-detail\", args=[id])\n link = '<a href=\"{}\">{}</a>'.format(url, siglum)\n return link\n\n def make_source_detail_link_with_title(source):\n id = source.id\n title = source.title\n url = reverse(\"source-detail\", args=[id])\n link = '<a href=\"{}\">{}</a>'.format(url, title)\n return link\n\n def make_add_new_chants_link(source):\n id = source.id\n url = reverse(\"chant-create\", args=[id])\n link = '<a href=\"{}\">+ Add new chant</a>'.format(url)\n return link\n\n def make_edit_chants_link(source):\n id = source.id\n url = reverse(\"source-edit-chants\", args=[id])\n link = '<a href=\"{}\">Edit chants (Fulltext & Volpiano editor)</a>'.format(url)\n return link\n\n def make_links_for_source(source):\n link_with_siglum = make_source_detail_link_with_siglum(source)\n link_with_title = make_source_detail_link_with_title(source)\n add_new_chants_link = make_add_new_chants_link(source)\n edit_chants_link = make_edit_chants_link(source)\n template = \"\"\"{sigl}<br>\n <small>\n <b>{title}</b><br>\n {add}<br>\n {edit}<br>\n </small>\n \"\"\"\n links_string = template.format(\n sigl=link_with_siglum,\n title=link_with_title,\n add=add_new_chants_link,\n edit=edit_chants_link,\n )\n return links_string\n\n MAX_SOURCES_TO_DISPLAY = 6\n sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]\n source_links = [make_links_for_source(source) for source in sources]\n list_items = [\"<li>{}</li>\".format(link) for link in source_links]\n joined_list_items = \"\".join(list_items)\n links_ul = \"<ul>{}</ul>\".format(joined_list_items)\n return mark_safe(links_ul)\n\n\[email protected](name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"\n Converts month number to textual representation, 3 letters (Jan, Mar, etc)\n\n used in:\n main_app/templates/feast_detail.html\n main_app/templates/feast_list.html\n \"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\[email protected]_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n \"\"\"\n accounts for the situations where there may be two paginations in one page\n\n Used in:\n main_app/templates/pagination.html\n main_app/templates/user_source_list.html\n \"\"\"\n query = context[\"request\"].GET.copy()\n if \"page\" in kwargs:\n query.pop(\"page\", None)\n if \"page2\" in kwargs:\n query.pop(\"page2\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\[email protected]_tag(takes_context=False)\ndef source_links():\n \"\"\"\n Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n sources = (\n Source.objects.filter(published=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n\n\[email protected]\ndef classname(obj):\n \"\"\"\n Returns the name of the object's class\n A use-case is: {% if object|classname == \"Notation\" %}\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n return obj.__class__.__name__\n\n\[email protected]\ndef admin_url_name(class_name, action):\n \"\"\"\n Accepts the name of a class in \"main_app\", and an action (either \"change\" or \"delete\") as arguments.\n Returns the name of the URL for changing/deleting an object in the admin interface.\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n class_name = class_name.lower()\n action = action.lower()\n\n return f\"admin:main_app_{class_name}_{action}\"\n\n\[email protected](name=\"has_group\")\ndef has_group(user, group_name):\n \"\"\"\n Used in:\n templates/base.html\n \"\"\"\n return user.groups.filter(name=group_name).exists()\n\n\[email protected]_tag(takes_context=True)\ndef get_user_source_pagination(context):\n user_created_sources = (\n Source.objects.filter(created_by=context[\"user\"])\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 10)\n page_number = context[\"request\"].GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n return page_obj\n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}], "after_files": [{"content": "import calendar\nfrom typing import Union, Optional\nfrom django import template\nfrom main_app.models import Source\nfrom articles.models import Article\nfrom django.utils.safestring import mark_safe\nfrom django.urls import reverse\nfrom django.core.paginator import Paginator\n\n\nregister = template.Library()\n\n\[email protected]_tag(takes_context=False)\ndef recent_articles():\n \"\"\"\n Generates a html unordered list of recent articles for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n articles = Article.objects.order_by(\"-date_created\")[:5]\n list_item_template = '<li style=\"padding-bottom: 0.5em;\"><a href=\"{url}\">{title}</a><br><small>{date}</small></li>'\n list_items = [\n list_item_template.format(\n url=a.get_absolute_url(),\n title=a.title,\n date=a.date_created.strftime(\"%A %B %-d, %Y\"),\n )\n for a in articles\n ]\n list_items_string = \"\".join(list_items)\n recent_articles_string = \"<ul>{lis}</ul>\".format(lis=list_items_string)\n return mark_safe(recent_articles_string)\n\n\[email protected]_tag(takes_context=False)\ndef my_sources(user):\n \"\"\"\n Generates a html unordered list of sources the currently logged-in user has access to edit, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n\n def make_source_detail_link_with_siglum(source):\n id = source.id\n siglum = source.rism_siglum\n url = reverse(\"source-detail\", args=[id])\n link = '<a href=\"{}\">{}</a>'.format(url, siglum)\n return link\n\n def make_source_detail_link_with_title(source):\n id = source.id\n title = source.title\n url = reverse(\"source-detail\", args=[id])\n link = '<a href=\"{}\">{}</a>'.format(url, title)\n return link\n\n def make_add_new_chants_link(source):\n id = source.id\n url = reverse(\"chant-create\", args=[id])\n link = '<a href=\"{}\">+ Add new chant</a>'.format(url)\n return link\n\n def make_edit_chants_link(source):\n id = source.id\n url = reverse(\"source-edit-chants\", args=[id])\n link = '<a href=\"{}\">Edit chants (Fulltext & Volpiano editor)</a>'.format(url)\n return link\n\n def make_links_for_source(source):\n link_with_siglum = make_source_detail_link_with_siglum(source)\n link_with_title = make_source_detail_link_with_title(source)\n add_new_chants_link = make_add_new_chants_link(source)\n if source.chant_set.exists():\n edit_chants_link = make_edit_chants_link(source)\n else:\n edit_chants_link = \"\"\n template = \"\"\"{sigl}<br>\n <small>\n <b>{title}</b><br>\n {add}<br>\n {edit}<br>\n </small>\n \"\"\"\n links_string = template.format(\n sigl=link_with_siglum,\n title=link_with_title,\n add=add_new_chants_link,\n edit=edit_chants_link,\n )\n return links_string\n\n MAX_SOURCES_TO_DISPLAY = 6\n sources = list(user.sources_user_can_edit.all())[:MAX_SOURCES_TO_DISPLAY]\n source_links = [make_links_for_source(source) for source in sources]\n list_items = [\"<li>{}</li>\".format(link) for link in source_links]\n joined_list_items = \"\".join(list_items)\n links_ul = \"<ul>{}</ul>\".format(joined_list_items)\n return mark_safe(links_ul)\n\n\[email protected](name=\"month_to_string\")\ndef month_to_string(value: Optional[Union[str, int]]) -> Optional[Union[str, int]]:\n \"\"\"\n Converts month number to textual representation, 3 letters (Jan, Mar, etc)\n\n used in:\n main_app/templates/feast_detail.html\n main_app/templates/feast_list.html\n \"\"\"\n if type(value) == int and value in range(1, 13):\n return calendar.month_abbr[value]\n else:\n return value\n\n\[email protected]_tag(takes_context=True)\ndef url_add_get_params(context, **kwargs):\n \"\"\"\n accounts for the situations where there may be two paginations in one page\n\n Used in:\n main_app/templates/pagination.html\n main_app/templates/user_source_list.html\n \"\"\"\n query = context[\"request\"].GET.copy()\n if \"page\" in kwargs:\n query.pop(\"page\", None)\n if \"page2\" in kwargs:\n query.pop(\"page2\", None)\n query.update(kwargs)\n return query.urlencode()\n\n\[email protected]_tag(takes_context=False)\ndef source_links():\n \"\"\"\n Generates a series of html option tags linking to sources in Cantus Dabase, for display on the homepage\n\n Used in:\n templates/flatpages/default.html\n \"\"\"\n sources = (\n Source.objects.filter(published=True, segment__id=4063)\n .exclude(siglum=None)\n .values(\"siglum\", \"id\")\n .order_by(\"siglum\")\n )\n options = \"\"\n for source in sources:\n option_str = (\n f\"<option value=source/{source['id']}>{source['siglum']}</option>\\n\"\n )\n options += option_str\n\n return mark_safe(options)\n\n\[email protected]\ndef classname(obj):\n \"\"\"\n Returns the name of the object's class\n A use-case is: {% if object|classname == \"Notation\" %}\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n return obj.__class__.__name__\n\n\[email protected]\ndef admin_url_name(class_name, action):\n \"\"\"\n Accepts the name of a class in \"main_app\", and an action (either \"change\" or \"delete\") as arguments.\n Returns the name of the URL for changing/deleting an object in the admin interface.\n\n Used in:\n main_app/templates/content_overview.html\n \"\"\"\n class_name = class_name.lower()\n action = action.lower()\n\n return f\"admin:main_app_{class_name}_{action}\"\n\n\[email protected](name=\"has_group\")\ndef has_group(user, group_name):\n \"\"\"\n Used in:\n templates/base.html\n \"\"\"\n return user.groups.filter(name=group_name).exists()\n\n\[email protected]_tag(takes_context=True)\ndef get_user_source_pagination(context):\n user_created_sources = (\n Source.objects.filter(created_by=context[\"user\"])\n .order_by(\"-date_created\")\n .distinct()\n )\n paginator = Paginator(user_created_sources, 10)\n page_number = context[\"request\"].GET.get(\"page\")\n page_obj = paginator.get_page(page_number)\n return page_obj\n", "path": "django/cantusdb_project/main_app/templatetags/helper_tags.py"}]}
| 2,312 | 211 |
gh_patches_debug_37072
|
rasdani/github-patches
|
git_diff
|
mitmproxy__mitmproxy-4719
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Unfragmented WebSocket messages getting fragmented
https://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L153-L175
While handling WebSocket events, mitmproxy doesn't distinguish between `message_finished` and `frame_finished` ([Message class](https://python-hyper.org/projects/wsproto/en/stable/api.html#wsproto.events.Message)) which in my case led to continuation frames being sent while there were none in the initial WebSocket message.
This is because the wsproto API doesn't always emit complete frames (I guess this is caused by the TCP fragmentation?), they could be chunked while the original WebSocket message has no fragmentation and I think even WebSocket messages using fragmentation with large continuation frames could be emitted as chunks themselves?
To avoid this behavior each `frame_buf` entry must be a complete frame, here is my fix suggestion:
```python
for ws_event in src_ws.events():
if isinstance(ws_event, wsproto.events.Message):
is_text = isinstance(ws_event.data, str)
# Add the data variable to avoid multiple conditions
if is_text:
typ = Opcode.TEXT
data = ws_event.data.encode()
else:
typ = Opcode.BINARY
data = ws_event.data
# Make each frame one entry to frame_buf, append if empty to avoid IndexError
if src_ws.frame_buf:
src_ws.frame_buf[-1] += data
else:
src_ws.frame_buf.append(data)
if ws_event.message_finished:
content = b"".join(src_ws.frame_buf)
fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)
src_ws.frame_buf.clear()
message = websocket.WebSocketMessage(typ, from_client, content)
self.flow.websocket.messages.append(message)
yield WebsocketMessageHook(self.flow)
if not message.dropped:
for msg in fragmentizer(message.content):
yield dst_ws.send2(msg)
# Initialize next frame entry
elif ws_event.frame_finished:
src_ws.frame_buf.append(b"")
```
It works for me but I didn't test it with in an environment using WebSocket continuation frames. Also this only works for unmodified WebSocket messages, injected or modified messages are still concerned by the issue because the `Fragmentizer` class compares the lengths so they will always fall into the else condition (unless you made the modified message keep its original length):
https://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L229-L243
For my use case I didn't make a proper fix for this, I just made the first condition always true, maybe a boolean variable can be added to the `WebSocketMessage` and `Fragmentizer` classes or something like that?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mitmproxy/proxy/layers/websocket.py`
Content:
```
1 import time
2 from dataclasses import dataclass
3 from typing import Iterator, List
4
5 import wsproto
6 import wsproto.extensions
7 import wsproto.frame_protocol
8 import wsproto.utilities
9 from mitmproxy import connection, http, websocket
10 from mitmproxy.proxy import commands, events, layer
11 from mitmproxy.proxy.commands import StartHook
12 from mitmproxy.proxy.context import Context
13 from mitmproxy.proxy.events import MessageInjected
14 from mitmproxy.proxy.utils import expect
15 from wsproto import ConnectionState
16 from wsproto.frame_protocol import Opcode
17
18
19 @dataclass
20 class WebsocketStartHook(StartHook):
21 """
22 A WebSocket connection has commenced.
23 """
24 flow: http.HTTPFlow
25
26
27 @dataclass
28 class WebsocketMessageHook(StartHook):
29 """
30 Called when a WebSocket message is received from the client or
31 server. The most recent message will be flow.messages[-1]. The
32 message is user-modifiable. Currently there are two types of
33 messages, corresponding to the BINARY and TEXT frame types.
34 """
35 flow: http.HTTPFlow
36
37
38 @dataclass
39 class WebsocketEndHook(StartHook):
40 """
41 A WebSocket connection has ended.
42 You can check `flow.websocket.close_code` to determine why it ended.
43 """
44
45 flow: http.HTTPFlow
46
47
48 class WebSocketMessageInjected(MessageInjected[websocket.WebSocketMessage]):
49 """
50 The user has injected a custom WebSocket message.
51 """
52
53
54 class WebsocketConnection(wsproto.Connection):
55 """
56 A very thin wrapper around wsproto.Connection:
57
58 - we keep the underlying connection as an attribute for easy access.
59 - we add a framebuffer for incomplete messages
60 - we wrap .send() so that we can directly yield it.
61 """
62 conn: connection.Connection
63 frame_buf: List[bytes]
64
65 def __init__(self, *args, conn: connection.Connection, **kwargs):
66 super(WebsocketConnection, self).__init__(*args, **kwargs)
67 self.conn = conn
68 self.frame_buf = []
69
70 def send2(self, event: wsproto.events.Event) -> commands.SendData:
71 data = self.send(event)
72 return commands.SendData(self.conn, data)
73
74 def __repr__(self):
75 return f"WebsocketConnection<{self.state.name}, {self.conn}>"
76
77
78 class WebsocketLayer(layer.Layer):
79 """
80 WebSocket layer that intercepts and relays messages.
81 """
82 flow: http.HTTPFlow
83 client_ws: WebsocketConnection
84 server_ws: WebsocketConnection
85
86 def __init__(self, context: Context, flow: http.HTTPFlow):
87 super().__init__(context)
88 self.flow = flow
89 assert context.server.connected
90
91 @expect(events.Start)
92 def start(self, _) -> layer.CommandGenerator[None]:
93
94 client_extensions = []
95 server_extensions = []
96
97 # Parse extension headers. We only support deflate at the moment and ignore everything else.
98 assert self.flow.response # satisfy type checker
99 ext_header = self.flow.response.headers.get("Sec-WebSocket-Extensions", "")
100 if ext_header:
101 for ext in wsproto.utilities.split_comma_header(ext_header.encode("ascii", "replace")):
102 ext_name = ext.split(";", 1)[0].strip()
103 if ext_name == wsproto.extensions.PerMessageDeflate.name:
104 client_deflate = wsproto.extensions.PerMessageDeflate()
105 client_deflate.finalize(ext)
106 client_extensions.append(client_deflate)
107 server_deflate = wsproto.extensions.PerMessageDeflate()
108 server_deflate.finalize(ext)
109 server_extensions.append(server_deflate)
110 else:
111 yield commands.Log(f"Ignoring unknown WebSocket extension {ext_name!r}.")
112
113 self.client_ws = WebsocketConnection(wsproto.ConnectionType.SERVER, client_extensions, conn=self.context.client)
114 self.server_ws = WebsocketConnection(wsproto.ConnectionType.CLIENT, server_extensions, conn=self.context.server)
115
116 yield WebsocketStartHook(self.flow)
117
118 self._handle_event = self.relay_messages
119
120 _handle_event = start
121
122 @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)
123 def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:
124 assert self.flow.websocket # satisfy type checker
125
126 if isinstance(event, events.ConnectionEvent):
127 from_client = event.connection == self.context.client
128 elif isinstance(event, WebSocketMessageInjected):
129 from_client = event.message.from_client
130 else:
131 raise AssertionError(f"Unexpected event: {event}")
132
133 from_str = 'client' if from_client else 'server'
134 if from_client:
135 src_ws = self.client_ws
136 dst_ws = self.server_ws
137 else:
138 src_ws = self.server_ws
139 dst_ws = self.client_ws
140
141 if isinstance(event, events.DataReceived):
142 src_ws.receive_data(event.data)
143 elif isinstance(event, events.ConnectionClosed):
144 src_ws.receive_data(None)
145 elif isinstance(event, WebSocketMessageInjected):
146 fragmentizer = Fragmentizer([], event.message.type == Opcode.TEXT)
147 src_ws._events.extend(
148 fragmentizer(event.message.content)
149 )
150 else: # pragma: no cover
151 raise AssertionError(f"Unexpected event: {event}")
152
153 for ws_event in src_ws.events():
154 if isinstance(ws_event, wsproto.events.Message):
155 is_text = isinstance(ws_event.data, str)
156 if is_text:
157 typ = Opcode.TEXT
158 src_ws.frame_buf.append(ws_event.data.encode())
159 else:
160 typ = Opcode.BINARY
161 src_ws.frame_buf.append(ws_event.data)
162
163 if ws_event.message_finished:
164 content = b"".join(src_ws.frame_buf)
165
166 fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)
167 src_ws.frame_buf.clear()
168
169 message = websocket.WebSocketMessage(typ, from_client, content)
170 self.flow.websocket.messages.append(message)
171 yield WebsocketMessageHook(self.flow)
172
173 if not message.dropped:
174 for msg in fragmentizer(message.content):
175 yield dst_ws.send2(msg)
176
177 elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):
178 yield commands.Log(
179 f"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} "
180 f"(payload: {bytes(ws_event.payload)!r})"
181 )
182 yield dst_ws.send2(ws_event)
183 elif isinstance(ws_event, wsproto.events.CloseConnection):
184 self.flow.websocket.timestamp_end = time.time()
185 self.flow.websocket.closed_by_client = from_client
186 self.flow.websocket.close_code = ws_event.code
187 self.flow.websocket.close_reason = ws_event.reason
188
189 for ws in [self.server_ws, self.client_ws]:
190 if ws.state in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:
191 # response == original event, so no need to differentiate here.
192 yield ws.send2(ws_event)
193 yield commands.CloseConnection(ws.conn)
194 yield WebsocketEndHook(self.flow)
195 self._handle_event = self.done
196 else: # pragma: no cover
197 raise AssertionError(f"Unexpected WebSocket event: {ws_event}")
198
199 @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)
200 def done(self, _) -> layer.CommandGenerator[None]:
201 yield from ()
202
203
204 class Fragmentizer:
205 """
206 Theory (RFC 6455):
207 Unless specified otherwise by an extension, frames have no semantic
208 meaning. An intermediary might coalesce and/or split frames, [...]
209
210 Practice:
211 Some WebSocket servers reject large payload sizes.
212
213 As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.
214 """
215 # A bit less than 4kb to accommodate for headers.
216 FRAGMENT_SIZE = 4000
217
218 def __init__(self, fragments: List[bytes], is_text: bool):
219 self.fragment_lengths = [len(x) for x in fragments]
220 self.is_text = is_text
221
222 def msg(self, data: bytes, message_finished: bool):
223 if self.is_text:
224 data_str = data.decode(errors="replace")
225 return wsproto.events.TextMessage(data_str, message_finished=message_finished)
226 else:
227 return wsproto.events.BytesMessage(data, message_finished=message_finished)
228
229 def __call__(self, content: bytes) -> Iterator[wsproto.events.Message]:
230 if len(content) == sum(self.fragment_lengths):
231 # message has the same length, we can reuse the same sizes
232 offset = 0
233 for fl in self.fragment_lengths[:-1]:
234 yield self.msg(content[offset:offset + fl], False)
235 offset += fl
236 yield self.msg(content[offset:], True)
237 else:
238 offset = 0
239 total = len(content) - self.FRAGMENT_SIZE
240 while offset < total:
241 yield self.msg(content[offset:offset + self.FRAGMENT_SIZE], False)
242 offset += self.FRAGMENT_SIZE
243 yield self.msg(content[offset:], True)
244
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mitmproxy/proxy/layers/websocket.py b/mitmproxy/proxy/layers/websocket.py
--- a/mitmproxy/proxy/layers/websocket.py
+++ b/mitmproxy/proxy/layers/websocket.py
@@ -65,7 +65,7 @@
def __init__(self, *args, conn: connection.Connection, **kwargs):
super(WebsocketConnection, self).__init__(*args, **kwargs)
self.conn = conn
- self.frame_buf = []
+ self.frame_buf = [b""]
def send2(self, event: wsproto.events.Event) -> commands.SendData:
data = self.send(event)
@@ -155,10 +155,10 @@
is_text = isinstance(ws_event.data, str)
if is_text:
typ = Opcode.TEXT
- src_ws.frame_buf.append(ws_event.data.encode())
+ src_ws.frame_buf[-1] += ws_event.data.encode()
else:
typ = Opcode.BINARY
- src_ws.frame_buf.append(ws_event.data)
+ src_ws.frame_buf[-1] += ws_event.data
if ws_event.message_finished:
content = b"".join(src_ws.frame_buf)
@@ -174,6 +174,9 @@
for msg in fragmentizer(message.content):
yield dst_ws.send2(msg)
+ elif ws_event.frame_finished:
+ src_ws.frame_buf.append(b"")
+
elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):
yield commands.Log(
f"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} "
@@ -209,8 +212,11 @@
Practice:
Some WebSocket servers reject large payload sizes.
+ Other WebSocket servers reject CONTINUATION frames.
As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.
+ If one deals with web servers that do not support CONTINUATION frames, addons need to monkeypatch FRAGMENT_SIZE
+ if they need to modify the message.
"""
# A bit less than 4kb to accommodate for headers.
FRAGMENT_SIZE = 4000
|
{"golden_diff": "diff --git a/mitmproxy/proxy/layers/websocket.py b/mitmproxy/proxy/layers/websocket.py\n--- a/mitmproxy/proxy/layers/websocket.py\n+++ b/mitmproxy/proxy/layers/websocket.py\n@@ -65,7 +65,7 @@\n def __init__(self, *args, conn: connection.Connection, **kwargs):\n super(WebsocketConnection, self).__init__(*args, **kwargs)\n self.conn = conn\n- self.frame_buf = []\n+ self.frame_buf = [b\"\"]\n \n def send2(self, event: wsproto.events.Event) -> commands.SendData:\n data = self.send(event)\n@@ -155,10 +155,10 @@\n is_text = isinstance(ws_event.data, str)\n if is_text:\n typ = Opcode.TEXT\n- src_ws.frame_buf.append(ws_event.data.encode())\n+ src_ws.frame_buf[-1] += ws_event.data.encode()\n else:\n typ = Opcode.BINARY\n- src_ws.frame_buf.append(ws_event.data)\n+ src_ws.frame_buf[-1] += ws_event.data\n \n if ws_event.message_finished:\n content = b\"\".join(src_ws.frame_buf)\n@@ -174,6 +174,9 @@\n for msg in fragmentizer(message.content):\n yield dst_ws.send2(msg)\n \n+ elif ws_event.frame_finished:\n+ src_ws.frame_buf.append(b\"\")\n+\n elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):\n yield commands.Log(\n f\"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} \"\n@@ -209,8 +212,11 @@\n \n Practice:\n Some WebSocket servers reject large payload sizes.\n+ Other WebSocket servers reject CONTINUATION frames.\n \n As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.\n+ If one deals with web servers that do not support CONTINUATION frames, addons need to monkeypatch FRAGMENT_SIZE\n+ if they need to modify the message.\n \"\"\"\n # A bit less than 4kb to accommodate for headers.\n FRAGMENT_SIZE = 4000\n", "issue": "Unfragmented WebSocket messages getting fragmented\nhttps://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L153-L175\r\n\r\nWhile handling WebSocket events, mitmproxy doesn't distinguish between `message_finished` and `frame_finished` ([Message class](https://python-hyper.org/projects/wsproto/en/stable/api.html#wsproto.events.Message)) which in my case led to continuation frames being sent while there were none in the initial WebSocket message.\r\n\r\nThis is because the wsproto API doesn't always emit complete frames (I guess this is caused by the TCP fragmentation?), they could be chunked while the original WebSocket message has no fragmentation and I think even WebSocket messages using fragmentation with large continuation frames could be emitted as chunks themselves?\r\n\r\nTo avoid this behavior each `frame_buf` entry must be a complete frame, here is my fix suggestion:\r\n\r\n```python\r\n for ws_event in src_ws.events():\r\n if isinstance(ws_event, wsproto.events.Message):\r\n is_text = isinstance(ws_event.data, str)\r\n\r\n # Add the data variable to avoid multiple conditions\r\n if is_text:\r\n typ = Opcode.TEXT\r\n data = ws_event.data.encode()\r\n else:\r\n typ = Opcode.BINARY\r\n data = ws_event.data\r\n\r\n # Make each frame one entry to frame_buf, append if empty to avoid IndexError\r\n if src_ws.frame_buf:\r\n src_ws.frame_buf[-1] += data\r\n else:\r\n src_ws.frame_buf.append(data)\r\n\r\n if ws_event.message_finished:\r\n content = b\"\".join(src_ws.frame_buf)\r\n\r\n fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)\r\n src_ws.frame_buf.clear()\r\n\r\n message = websocket.WebSocketMessage(typ, from_client, content)\r\n self.flow.websocket.messages.append(message)\r\n yield WebsocketMessageHook(self.flow)\r\n\r\n if not message.dropped:\r\n for msg in fragmentizer(message.content):\r\n yield dst_ws.send2(msg)\r\n\r\n # Initialize next frame entry\r\n elif ws_event.frame_finished:\r\n src_ws.frame_buf.append(b\"\")\r\n```\r\n\r\nIt works for me but I didn't test it with in an environment using WebSocket continuation frames. Also this only works for unmodified WebSocket messages, injected or modified messages are still concerned by the issue because the `Fragmentizer` class compares the lengths so they will always fall into the else condition (unless you made the modified message keep its original length):\r\n\r\nhttps://github.com/mitmproxy/mitmproxy/blob/e270399a3eba7b3212bf7325beaa50e159a7ca0a/mitmproxy/proxy/layers/websocket.py#L229-L243\r\n\r\nFor my use case I didn't make a proper fix for this, I just made the first condition always true, maybe a boolean variable can be added to the `WebSocketMessage` and `Fragmentizer` classes or something like that?\n", "before_files": [{"content": "import time\nfrom dataclasses import dataclass\nfrom typing import Iterator, List\n\nimport wsproto\nimport wsproto.extensions\nimport wsproto.frame_protocol\nimport wsproto.utilities\nfrom mitmproxy import connection, http, websocket\nfrom mitmproxy.proxy import commands, events, layer\nfrom mitmproxy.proxy.commands import StartHook\nfrom mitmproxy.proxy.context import Context\nfrom mitmproxy.proxy.events import MessageInjected\nfrom mitmproxy.proxy.utils import expect\nfrom wsproto import ConnectionState\nfrom wsproto.frame_protocol import Opcode\n\n\n@dataclass\nclass WebsocketStartHook(StartHook):\n \"\"\"\n A WebSocket connection has commenced.\n \"\"\"\n flow: http.HTTPFlow\n\n\n@dataclass\nclass WebsocketMessageHook(StartHook):\n \"\"\"\n Called when a WebSocket message is received from the client or\n server. The most recent message will be flow.messages[-1]. The\n message is user-modifiable. Currently there are two types of\n messages, corresponding to the BINARY and TEXT frame types.\n \"\"\"\n flow: http.HTTPFlow\n\n\n@dataclass\nclass WebsocketEndHook(StartHook):\n \"\"\"\n A WebSocket connection has ended.\n You can check `flow.websocket.close_code` to determine why it ended.\n \"\"\"\n\n flow: http.HTTPFlow\n\n\nclass WebSocketMessageInjected(MessageInjected[websocket.WebSocketMessage]):\n \"\"\"\n The user has injected a custom WebSocket message.\n \"\"\"\n\n\nclass WebsocketConnection(wsproto.Connection):\n \"\"\"\n A very thin wrapper around wsproto.Connection:\n\n - we keep the underlying connection as an attribute for easy access.\n - we add a framebuffer for incomplete messages\n - we wrap .send() so that we can directly yield it.\n \"\"\"\n conn: connection.Connection\n frame_buf: List[bytes]\n\n def __init__(self, *args, conn: connection.Connection, **kwargs):\n super(WebsocketConnection, self).__init__(*args, **kwargs)\n self.conn = conn\n self.frame_buf = []\n\n def send2(self, event: wsproto.events.Event) -> commands.SendData:\n data = self.send(event)\n return commands.SendData(self.conn, data)\n\n def __repr__(self):\n return f\"WebsocketConnection<{self.state.name}, {self.conn}>\"\n\n\nclass WebsocketLayer(layer.Layer):\n \"\"\"\n WebSocket layer that intercepts and relays messages.\n \"\"\"\n flow: http.HTTPFlow\n client_ws: WebsocketConnection\n server_ws: WebsocketConnection\n\n def __init__(self, context: Context, flow: http.HTTPFlow):\n super().__init__(context)\n self.flow = flow\n assert context.server.connected\n\n @expect(events.Start)\n def start(self, _) -> layer.CommandGenerator[None]:\n\n client_extensions = []\n server_extensions = []\n\n # Parse extension headers. We only support deflate at the moment and ignore everything else.\n assert self.flow.response # satisfy type checker\n ext_header = self.flow.response.headers.get(\"Sec-WebSocket-Extensions\", \"\")\n if ext_header:\n for ext in wsproto.utilities.split_comma_header(ext_header.encode(\"ascii\", \"replace\")):\n ext_name = ext.split(\";\", 1)[0].strip()\n if ext_name == wsproto.extensions.PerMessageDeflate.name:\n client_deflate = wsproto.extensions.PerMessageDeflate()\n client_deflate.finalize(ext)\n client_extensions.append(client_deflate)\n server_deflate = wsproto.extensions.PerMessageDeflate()\n server_deflate.finalize(ext)\n server_extensions.append(server_deflate)\n else:\n yield commands.Log(f\"Ignoring unknown WebSocket extension {ext_name!r}.\")\n\n self.client_ws = WebsocketConnection(wsproto.ConnectionType.SERVER, client_extensions, conn=self.context.client)\n self.server_ws = WebsocketConnection(wsproto.ConnectionType.CLIENT, server_extensions, conn=self.context.server)\n\n yield WebsocketStartHook(self.flow)\n\n self._handle_event = self.relay_messages\n\n _handle_event = start\n\n @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)\n def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:\n assert self.flow.websocket # satisfy type checker\n\n if isinstance(event, events.ConnectionEvent):\n from_client = event.connection == self.context.client\n elif isinstance(event, WebSocketMessageInjected):\n from_client = event.message.from_client\n else:\n raise AssertionError(f\"Unexpected event: {event}\")\n\n from_str = 'client' if from_client else 'server'\n if from_client:\n src_ws = self.client_ws\n dst_ws = self.server_ws\n else:\n src_ws = self.server_ws\n dst_ws = self.client_ws\n\n if isinstance(event, events.DataReceived):\n src_ws.receive_data(event.data)\n elif isinstance(event, events.ConnectionClosed):\n src_ws.receive_data(None)\n elif isinstance(event, WebSocketMessageInjected):\n fragmentizer = Fragmentizer([], event.message.type == Opcode.TEXT)\n src_ws._events.extend(\n fragmentizer(event.message.content)\n )\n else: # pragma: no cover\n raise AssertionError(f\"Unexpected event: {event}\")\n\n for ws_event in src_ws.events():\n if isinstance(ws_event, wsproto.events.Message):\n is_text = isinstance(ws_event.data, str)\n if is_text:\n typ = Opcode.TEXT\n src_ws.frame_buf.append(ws_event.data.encode())\n else:\n typ = Opcode.BINARY\n src_ws.frame_buf.append(ws_event.data)\n\n if ws_event.message_finished:\n content = b\"\".join(src_ws.frame_buf)\n\n fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)\n src_ws.frame_buf.clear()\n\n message = websocket.WebSocketMessage(typ, from_client, content)\n self.flow.websocket.messages.append(message)\n yield WebsocketMessageHook(self.flow)\n\n if not message.dropped:\n for msg in fragmentizer(message.content):\n yield dst_ws.send2(msg)\n\n elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):\n yield commands.Log(\n f\"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} \"\n f\"(payload: {bytes(ws_event.payload)!r})\"\n )\n yield dst_ws.send2(ws_event)\n elif isinstance(ws_event, wsproto.events.CloseConnection):\n self.flow.websocket.timestamp_end = time.time()\n self.flow.websocket.closed_by_client = from_client\n self.flow.websocket.close_code = ws_event.code\n self.flow.websocket.close_reason = ws_event.reason\n\n for ws in [self.server_ws, self.client_ws]:\n if ws.state in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:\n # response == original event, so no need to differentiate here.\n yield ws.send2(ws_event)\n yield commands.CloseConnection(ws.conn)\n yield WebsocketEndHook(self.flow)\n self._handle_event = self.done\n else: # pragma: no cover\n raise AssertionError(f\"Unexpected WebSocket event: {ws_event}\")\n\n @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)\n def done(self, _) -> layer.CommandGenerator[None]:\n yield from ()\n\n\nclass Fragmentizer:\n \"\"\"\n Theory (RFC 6455):\n Unless specified otherwise by an extension, frames have no semantic\n meaning. An intermediary might coalesce and/or split frames, [...]\n\n Practice:\n Some WebSocket servers reject large payload sizes.\n\n As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.\n \"\"\"\n # A bit less than 4kb to accommodate for headers.\n FRAGMENT_SIZE = 4000\n\n def __init__(self, fragments: List[bytes], is_text: bool):\n self.fragment_lengths = [len(x) for x in fragments]\n self.is_text = is_text\n\n def msg(self, data: bytes, message_finished: bool):\n if self.is_text:\n data_str = data.decode(errors=\"replace\")\n return wsproto.events.TextMessage(data_str, message_finished=message_finished)\n else:\n return wsproto.events.BytesMessage(data, message_finished=message_finished)\n\n def __call__(self, content: bytes) -> Iterator[wsproto.events.Message]:\n if len(content) == sum(self.fragment_lengths):\n # message has the same length, we can reuse the same sizes\n offset = 0\n for fl in self.fragment_lengths[:-1]:\n yield self.msg(content[offset:offset + fl], False)\n offset += fl\n yield self.msg(content[offset:], True)\n else:\n offset = 0\n total = len(content) - self.FRAGMENT_SIZE\n while offset < total:\n yield self.msg(content[offset:offset + self.FRAGMENT_SIZE], False)\n offset += self.FRAGMENT_SIZE\n yield self.msg(content[offset:], True)\n", "path": "mitmproxy/proxy/layers/websocket.py"}], "after_files": [{"content": "import time\nfrom dataclasses import dataclass\nfrom typing import Iterator, List\n\nimport wsproto\nimport wsproto.extensions\nimport wsproto.frame_protocol\nimport wsproto.utilities\nfrom mitmproxy import connection, http, websocket\nfrom mitmproxy.proxy import commands, events, layer\nfrom mitmproxy.proxy.commands import StartHook\nfrom mitmproxy.proxy.context import Context\nfrom mitmproxy.proxy.events import MessageInjected\nfrom mitmproxy.proxy.utils import expect\nfrom wsproto import ConnectionState\nfrom wsproto.frame_protocol import Opcode\n\n\n@dataclass\nclass WebsocketStartHook(StartHook):\n \"\"\"\n A WebSocket connection has commenced.\n \"\"\"\n flow: http.HTTPFlow\n\n\n@dataclass\nclass WebsocketMessageHook(StartHook):\n \"\"\"\n Called when a WebSocket message is received from the client or\n server. The most recent message will be flow.messages[-1]. The\n message is user-modifiable. Currently there are two types of\n messages, corresponding to the BINARY and TEXT frame types.\n \"\"\"\n flow: http.HTTPFlow\n\n\n@dataclass\nclass WebsocketEndHook(StartHook):\n \"\"\"\n A WebSocket connection has ended.\n You can check `flow.websocket.close_code` to determine why it ended.\n \"\"\"\n\n flow: http.HTTPFlow\n\n\nclass WebSocketMessageInjected(MessageInjected[websocket.WebSocketMessage]):\n \"\"\"\n The user has injected a custom WebSocket message.\n \"\"\"\n\n\nclass WebsocketConnection(wsproto.Connection):\n \"\"\"\n A very thin wrapper around wsproto.Connection:\n\n - we keep the underlying connection as an attribute for easy access.\n - we add a framebuffer for incomplete messages\n - we wrap .send() so that we can directly yield it.\n \"\"\"\n conn: connection.Connection\n frame_buf: List[bytes]\n\n def __init__(self, *args, conn: connection.Connection, **kwargs):\n super(WebsocketConnection, self).__init__(*args, **kwargs)\n self.conn = conn\n self.frame_buf = [b\"\"]\n\n def send2(self, event: wsproto.events.Event) -> commands.SendData:\n data = self.send(event)\n return commands.SendData(self.conn, data)\n\n def __repr__(self):\n return f\"WebsocketConnection<{self.state.name}, {self.conn}>\"\n\n\nclass WebsocketLayer(layer.Layer):\n \"\"\"\n WebSocket layer that intercepts and relays messages.\n \"\"\"\n flow: http.HTTPFlow\n client_ws: WebsocketConnection\n server_ws: WebsocketConnection\n\n def __init__(self, context: Context, flow: http.HTTPFlow):\n super().__init__(context)\n self.flow = flow\n assert context.server.connected\n\n @expect(events.Start)\n def start(self, _) -> layer.CommandGenerator[None]:\n\n client_extensions = []\n server_extensions = []\n\n # Parse extension headers. We only support deflate at the moment and ignore everything else.\n assert self.flow.response # satisfy type checker\n ext_header = self.flow.response.headers.get(\"Sec-WebSocket-Extensions\", \"\")\n if ext_header:\n for ext in wsproto.utilities.split_comma_header(ext_header.encode(\"ascii\", \"replace\")):\n ext_name = ext.split(\";\", 1)[0].strip()\n if ext_name == wsproto.extensions.PerMessageDeflate.name:\n client_deflate = wsproto.extensions.PerMessageDeflate()\n client_deflate.finalize(ext)\n client_extensions.append(client_deflate)\n server_deflate = wsproto.extensions.PerMessageDeflate()\n server_deflate.finalize(ext)\n server_extensions.append(server_deflate)\n else:\n yield commands.Log(f\"Ignoring unknown WebSocket extension {ext_name!r}.\")\n\n self.client_ws = WebsocketConnection(wsproto.ConnectionType.SERVER, client_extensions, conn=self.context.client)\n self.server_ws = WebsocketConnection(wsproto.ConnectionType.CLIENT, server_extensions, conn=self.context.server)\n\n yield WebsocketStartHook(self.flow)\n\n self._handle_event = self.relay_messages\n\n _handle_event = start\n\n @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)\n def relay_messages(self, event: events.Event) -> layer.CommandGenerator[None]:\n assert self.flow.websocket # satisfy type checker\n\n if isinstance(event, events.ConnectionEvent):\n from_client = event.connection == self.context.client\n elif isinstance(event, WebSocketMessageInjected):\n from_client = event.message.from_client\n else:\n raise AssertionError(f\"Unexpected event: {event}\")\n\n from_str = 'client' if from_client else 'server'\n if from_client:\n src_ws = self.client_ws\n dst_ws = self.server_ws\n else:\n src_ws = self.server_ws\n dst_ws = self.client_ws\n\n if isinstance(event, events.DataReceived):\n src_ws.receive_data(event.data)\n elif isinstance(event, events.ConnectionClosed):\n src_ws.receive_data(None)\n elif isinstance(event, WebSocketMessageInjected):\n fragmentizer = Fragmentizer([], event.message.type == Opcode.TEXT)\n src_ws._events.extend(\n fragmentizer(event.message.content)\n )\n else: # pragma: no cover\n raise AssertionError(f\"Unexpected event: {event}\")\n\n for ws_event in src_ws.events():\n if isinstance(ws_event, wsproto.events.Message):\n is_text = isinstance(ws_event.data, str)\n if is_text:\n typ = Opcode.TEXT\n src_ws.frame_buf[-1] += ws_event.data.encode()\n else:\n typ = Opcode.BINARY\n src_ws.frame_buf[-1] += ws_event.data\n\n if ws_event.message_finished:\n content = b\"\".join(src_ws.frame_buf)\n\n fragmentizer = Fragmentizer(src_ws.frame_buf, is_text)\n src_ws.frame_buf.clear()\n\n message = websocket.WebSocketMessage(typ, from_client, content)\n self.flow.websocket.messages.append(message)\n yield WebsocketMessageHook(self.flow)\n\n if not message.dropped:\n for msg in fragmentizer(message.content):\n yield dst_ws.send2(msg)\n\n elif ws_event.frame_finished:\n src_ws.frame_buf.append(b\"\")\n\n elif isinstance(ws_event, (wsproto.events.Ping, wsproto.events.Pong)):\n yield commands.Log(\n f\"Received WebSocket {ws_event.__class__.__name__.lower()} from {from_str} \"\n f\"(payload: {bytes(ws_event.payload)!r})\"\n )\n yield dst_ws.send2(ws_event)\n elif isinstance(ws_event, wsproto.events.CloseConnection):\n self.flow.websocket.timestamp_end = time.time()\n self.flow.websocket.closed_by_client = from_client\n self.flow.websocket.close_code = ws_event.code\n self.flow.websocket.close_reason = ws_event.reason\n\n for ws in [self.server_ws, self.client_ws]:\n if ws.state in {ConnectionState.OPEN, ConnectionState.REMOTE_CLOSING}:\n # response == original event, so no need to differentiate here.\n yield ws.send2(ws_event)\n yield commands.CloseConnection(ws.conn)\n yield WebsocketEndHook(self.flow)\n self._handle_event = self.done\n else: # pragma: no cover\n raise AssertionError(f\"Unexpected WebSocket event: {ws_event}\")\n\n @expect(events.DataReceived, events.ConnectionClosed, WebSocketMessageInjected)\n def done(self, _) -> layer.CommandGenerator[None]:\n yield from ()\n\n\nclass Fragmentizer:\n \"\"\"\n Theory (RFC 6455):\n Unless specified otherwise by an extension, frames have no semantic\n meaning. An intermediary might coalesce and/or split frames, [...]\n\n Practice:\n Some WebSocket servers reject large payload sizes.\n Other WebSocket servers reject CONTINUATION frames.\n\n As a workaround, we either retain the original chunking or, if the payload has been modified, use ~4kB chunks.\n If one deals with web servers that do not support CONTINUATION frames, addons need to monkeypatch FRAGMENT_SIZE\n if they need to modify the message.\n \"\"\"\n # A bit less than 4kb to accommodate for headers.\n FRAGMENT_SIZE = 4000\n\n def __init__(self, fragments: List[bytes], is_text: bool):\n self.fragment_lengths = [len(x) for x in fragments]\n self.is_text = is_text\n\n def msg(self, data: bytes, message_finished: bool):\n if self.is_text:\n data_str = data.decode(errors=\"replace\")\n return wsproto.events.TextMessage(data_str, message_finished=message_finished)\n else:\n return wsproto.events.BytesMessage(data, message_finished=message_finished)\n\n def __call__(self, content: bytes) -> Iterator[wsproto.events.Message]:\n if len(content) == sum(self.fragment_lengths):\n # message has the same length, we can reuse the same sizes\n offset = 0\n for fl in self.fragment_lengths[:-1]:\n yield self.msg(content[offset:offset + fl], False)\n offset += fl\n yield self.msg(content[offset:], True)\n else:\n offset = 0\n total = len(content) - self.FRAGMENT_SIZE\n while offset < total:\n yield self.msg(content[offset:offset + self.FRAGMENT_SIZE], False)\n offset += self.FRAGMENT_SIZE\n yield self.msg(content[offset:], True)\n", "path": "mitmproxy/proxy/layers/websocket.py"}]}
| 3,444 | 490 |
gh_patches_debug_13914
|
rasdani/github-patches
|
git_diff
|
liqd__a4-meinberlin-2083
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
store window: district tile announces more results than there are if I click on them
store window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.
can we only count running projects with participation?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `meinberlin/apps/cms/models/storefronts.py`
Content:
```
1 import random
2
3 from django.db import models
4 from django.utils.functional import cached_property
5 from modelcluster.fields import ParentalKey
6 from modelcluster.models import ClusterableModel
7 from wagtail.admin import edit_handlers
8 from wagtail.admin.edit_handlers import FieldPanel
9 from wagtail.images.edit_handlers import ImageChooserPanel
10 from wagtail.snippets.models import register_snippet
11
12 from adhocracy4.comments.models import Comment
13 from adhocracy4.modules.models import Item
14 from adhocracy4.projects.models import Project
15 from meinberlin.apps.projects import get_project_type
16
17
18 class StorefrontItem(models.Model):
19 district = models.ForeignKey(
20 'a4administrative_districts.AdministrativeDistrict',
21 related_name='+',
22 null=True,
23 blank=True
24 )
25 project = models.ForeignKey(
26 'a4projects.Project',
27 related_name='+',
28 null=True,
29 blank=True
30 )
31 quote = models.TextField(
32 blank=True,
33 max_length=150
34 )
35
36 def __str__(self):
37 return str(self.pk)
38
39 @cached_property
40 def item_type(self):
41 if get_project_type(self.project) in ('external', 'bplan'):
42 return 'external'
43 return 'project'
44
45 @cached_property
46 def project_url(self):
47 if self.item_type == 'external':
48 return self.project.externalproject.url
49 return self.project.get_absolute_url()
50
51 @cached_property
52 def district_project_count(self):
53 return Project.objects\
54 .filter(administrative_district=self.district,
55 is_draft=False,
56 is_public=True,
57 is_archived=False
58 ).count()
59
60 panels = [
61 FieldPanel('district'),
62 FieldPanel('project'),
63 FieldPanel('quote'),
64 ]
65
66
67 @register_snippet
68 class Storefront(ClusterableModel):
69 title = models.CharField(max_length=255, null=False, blank=False)
70 image = models.ForeignKey(
71 'meinberlin_cms.CustomImage',
72 null=True,
73 blank=True,
74 on_delete=models.SET_NULL,
75 related_name='+'
76 )
77 teaser = models.CharField(max_length=100)
78
79 def __str__(self):
80 return self.title
81
82 @cached_property
83 def num_entries(self):
84 num_comments = Comment.objects.all().count()
85 num_items = Item.objects.all().count()
86 return num_comments + num_items
87
88 @cached_property
89 def num_projects(self):
90 projects = Project.objects.all()\
91 .filter(is_draft=False, is_archived=False, is_public=True)
92 active_project_count = 0
93 for project in projects:
94 if project.active_phase or project.future_phases:
95 active_project_count += 1
96 return active_project_count
97
98 @cached_property
99 def random_items(self):
100 items = self.items.all()
101 if items.count() > 3:
102 items_list = items.values_list('id', flat=True)
103 random_items = random.sample(list(items_list), 3)
104 return StorefrontItem.objects.filter(id__in=random_items)
105 else:
106 return items
107
108 title_panel = [
109 edit_handlers.FieldPanel('title')
110 ]
111
112 image_tile_panel = [
113 ImageChooserPanel('image'),
114 edit_handlers.FieldPanel('teaser')
115 ]
116
117 project_tiles_panel = [
118 edit_handlers.InlinePanel('items', min_num=3)
119 ]
120
121 edit_handler = edit_handlers.TabbedInterface([
122 edit_handlers.ObjectList(title_panel, heading='Title'),
123 edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),
124 edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')
125 ])
126
127
128 class StorefrontCollection(StorefrontItem):
129 parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')
130
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py
--- a/meinberlin/apps/cms/models/storefronts.py
+++ b/meinberlin/apps/cms/models/storefronts.py
@@ -50,12 +50,17 @@
@cached_property
def district_project_count(self):
- return Project.objects\
+ projects = Project.objects\
.filter(administrative_district=self.district,
is_draft=False,
is_public=True,
is_archived=False
- ).count()
+ )
+ active_project_count = 0
+ for project in projects:
+ if project.active_phase or project.future_phases:
+ active_project_count += 1
+ return active_project_count
panels = [
FieldPanel('district'),
|
{"golden_diff": "diff --git a/meinberlin/apps/cms/models/storefronts.py b/meinberlin/apps/cms/models/storefronts.py\n--- a/meinberlin/apps/cms/models/storefronts.py\n+++ b/meinberlin/apps/cms/models/storefronts.py\n@@ -50,12 +50,17 @@\n \n @cached_property\n def district_project_count(self):\n- return Project.objects\\\n+ projects = Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n- ).count()\n+ )\n+ active_project_count = 0\n+ for project in projects:\n+ if project.active_phase or project.future_phases:\n+ active_project_count += 1\n+ return active_project_count\n \n panels = [\n FieldPanel('district'),\n", "issue": "store window: district tile announces more results than there are if I click on them\nstore window tile shows ALL project of district, if I click and get to project overview, the default filter takes out all old projects and plans without beteiligung.\r\n\r\ncan we only count running projects with participation?\n", "before_files": [{"content": "import random\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.models import register_snippet\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.modules.models import Item\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass StorefrontItem(models.Model):\n district = models.ForeignKey(\n 'a4administrative_districts.AdministrativeDistrict',\n related_name='+',\n null=True,\n blank=True\n )\n project = models.ForeignKey(\n 'a4projects.Project',\n related_name='+',\n null=True,\n blank=True\n )\n quote = models.TextField(\n blank=True,\n max_length=150\n )\n\n def __str__(self):\n return str(self.pk)\n\n @cached_property\n def item_type(self):\n if get_project_type(self.project) in ('external', 'bplan'):\n return 'external'\n return 'project'\n\n @cached_property\n def project_url(self):\n if self.item_type == 'external':\n return self.project.externalproject.url\n return self.project.get_absolute_url()\n\n @cached_property\n def district_project_count(self):\n return Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n ).count()\n\n panels = [\n FieldPanel('district'),\n FieldPanel('project'),\n FieldPanel('quote'),\n ]\n\n\n@register_snippet\nclass Storefront(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n image = models.ForeignKey(\n 'meinberlin_cms.CustomImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n teaser = models.CharField(max_length=100)\n\n def __str__(self):\n return self.title\n\n @cached_property\n def num_entries(self):\n num_comments = Comment.objects.all().count()\n num_items = Item.objects.all().count()\n return num_comments + num_items\n\n @cached_property\n def num_projects(self):\n projects = Project.objects.all()\\\n .filter(is_draft=False, is_archived=False, is_public=True)\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n @cached_property\n def random_items(self):\n items = self.items.all()\n if items.count() > 3:\n items_list = items.values_list('id', flat=True)\n random_items = random.sample(list(items_list), 3)\n return StorefrontItem.objects.filter(id__in=random_items)\n else:\n return items\n\n title_panel = [\n edit_handlers.FieldPanel('title')\n ]\n\n image_tile_panel = [\n ImageChooserPanel('image'),\n edit_handlers.FieldPanel('teaser')\n ]\n\n project_tiles_panel = [\n edit_handlers.InlinePanel('items', min_num=3)\n ]\n\n edit_handler = edit_handlers.TabbedInterface([\n edit_handlers.ObjectList(title_panel, heading='Title'),\n edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),\n edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')\n ])\n\n\nclass StorefrontCollection(StorefrontItem):\n parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')\n", "path": "meinberlin/apps/cms/models/storefronts.py"}], "after_files": [{"content": "import random\n\nfrom django.db import models\nfrom django.utils.functional import cached_property\nfrom modelcluster.fields import ParentalKey\nfrom modelcluster.models import ClusterableModel\nfrom wagtail.admin import edit_handlers\nfrom wagtail.admin.edit_handlers import FieldPanel\nfrom wagtail.images.edit_handlers import ImageChooserPanel\nfrom wagtail.snippets.models import register_snippet\n\nfrom adhocracy4.comments.models import Comment\nfrom adhocracy4.modules.models import Item\nfrom adhocracy4.projects.models import Project\nfrom meinberlin.apps.projects import get_project_type\n\n\nclass StorefrontItem(models.Model):\n district = models.ForeignKey(\n 'a4administrative_districts.AdministrativeDistrict',\n related_name='+',\n null=True,\n blank=True\n )\n project = models.ForeignKey(\n 'a4projects.Project',\n related_name='+',\n null=True,\n blank=True\n )\n quote = models.TextField(\n blank=True,\n max_length=150\n )\n\n def __str__(self):\n return str(self.pk)\n\n @cached_property\n def item_type(self):\n if get_project_type(self.project) in ('external', 'bplan'):\n return 'external'\n return 'project'\n\n @cached_property\n def project_url(self):\n if self.item_type == 'external':\n return self.project.externalproject.url\n return self.project.get_absolute_url()\n\n @cached_property\n def district_project_count(self):\n projects = Project.objects\\\n .filter(administrative_district=self.district,\n is_draft=False,\n is_public=True,\n is_archived=False\n )\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n panels = [\n FieldPanel('district'),\n FieldPanel('project'),\n FieldPanel('quote'),\n ]\n\n\n@register_snippet\nclass Storefront(ClusterableModel):\n title = models.CharField(max_length=255, null=False, blank=False)\n image = models.ForeignKey(\n 'meinberlin_cms.CustomImage',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n related_name='+'\n )\n teaser = models.CharField(max_length=100)\n\n def __str__(self):\n return self.title\n\n @cached_property\n def num_entries(self):\n num_comments = Comment.objects.all().count()\n num_items = Item.objects.all().count()\n return num_comments + num_items\n\n @cached_property\n def num_projects(self):\n projects = Project.objects.all()\\\n .filter(is_draft=False, is_archived=False, is_public=True)\n active_project_count = 0\n for project in projects:\n if project.active_phase or project.future_phases:\n active_project_count += 1\n return active_project_count\n\n @cached_property\n def random_items(self):\n items = self.items.all()\n if items.count() > 3:\n items_list = items.values_list('id', flat=True)\n random_items = random.sample(list(items_list), 3)\n return StorefrontItem.objects.filter(id__in=random_items)\n else:\n return items\n\n title_panel = [\n edit_handlers.FieldPanel('title')\n ]\n\n image_tile_panel = [\n ImageChooserPanel('image'),\n edit_handlers.FieldPanel('teaser')\n ]\n\n project_tiles_panel = [\n edit_handlers.InlinePanel('items', min_num=3)\n ]\n\n edit_handler = edit_handlers.TabbedInterface([\n edit_handlers.ObjectList(title_panel, heading='Title'),\n edit_handlers.ObjectList(image_tile_panel, heading='Image Tile'),\n edit_handlers.ObjectList(project_tiles_panel, heading='Project Tiles')\n ])\n\n\nclass StorefrontCollection(StorefrontItem):\n parent = ParentalKey('meinberlin_cms.Storefront', related_name='items')\n", "path": "meinberlin/apps/cms/models/storefronts.py"}]}
| 1,413 | 185 |
gh_patches_debug_40807
|
rasdani/github-patches
|
git_diff
|
microsoft__ptvsd-909
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Changes in CLI arguments between versions
## Environment data
- PTVSD version: 4.1.3
- Using VS Code or Visual Studio: N/A
## Actual behavior
* CLI args for starting apps for remote debugging has changed
* Now the arg requires `--server-host`, this wasn't the case in the past.
* This is a breaking change.
## Expected behavior
* We need to revert if possible as existing users will not be able to use PTSVD for remote debugging scenarios.
Reported here https://github.com/Microsoft/vscode-python/issues/2833#issuecomment-428422616
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ptvsd/__main__.py`
Content:
```
1 # Copyright (c) Microsoft Corporation. All rights reserved.
2 # Licensed under the MIT License. See LICENSE in the project root
3 # for license information.
4
5 import argparse
6 import os.path
7 import sys
8
9 from ptvsd._attach import attach_main
10 from ptvsd._local import debug_main, run_main
11 from ptvsd.socket import Address
12 from ptvsd.version import __version__, __author__ # noqa
13
14
15 ##################################
16 # the script
17
18 """
19 For the PyDevd CLI handling see:
20
21 https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py
22 https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)
23 """ # noqa
24
25 PYDEVD_OPTS = {
26 '--file',
27 '--client',
28 #'--port',
29 '--vm_type',
30 }
31
32 PYDEVD_FLAGS = {
33 '--DEBUG',
34 '--DEBUG_RECORD_SOCKET_READS',
35 '--cmd-line',
36 '--module',
37 '--multiproc',
38 '--multiprocess',
39 '--print-in-debugger-startup',
40 '--save-signatures',
41 '--save-threading',
42 '--save-asyncio',
43 '--server',
44 '--qt-support=auto',
45 }
46
47 USAGE = """
48 {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]
49 {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]
50 {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID
51 """ # noqa
52
53
54 def parse_args(argv=None):
55 """Return the parsed args to use in main()."""
56 if argv is None:
57 argv = sys.argv
58 prog = argv[0]
59 if prog == __file__:
60 prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))
61 else:
62 prog = argv[0]
63 argv = argv[1:]
64
65 supported, pydevd, script = _group_args(argv)
66 args = _parse_args(prog, supported)
67 # '--' is used in _run_args to extract pydevd specific args
68 extra = pydevd + ['--']
69 if script:
70 extra += script
71 return args, extra
72
73
74 def _group_args(argv):
75 supported = []
76 pydevd = []
77 script = []
78
79 try:
80 pos = argv.index('--')
81 except ValueError:
82 script = []
83 else:
84 script = argv[pos + 1:]
85 argv = argv[:pos]
86
87 for arg in argv:
88 if arg == '-h' or arg == '--help':
89 return argv, [], script
90
91 gottarget = False
92 skip = 0
93 for i in range(len(argv)):
94 if skip:
95 skip -= 1
96 continue
97
98 arg = argv[i]
99 try:
100 nextarg = argv[i + 1]
101 except IndexError:
102 nextarg = None
103
104 # TODO: Deprecate the PyDevd arg support.
105 # PyDevd support
106 if gottarget:
107 script = argv[i:] + script
108 break
109 if arg == '--client':
110 arg = '--host'
111 elif arg == '--file':
112 if nextarg is None: # The filename is missing...
113 pydevd.append(arg)
114 continue # This will get handled later.
115 if nextarg.endswith(':') and '--module' in pydevd:
116 pydevd.remove('--module')
117 arg = '-m'
118 argv[i + 1] = nextarg = nextarg[:-1]
119 else:
120 arg = nextarg
121 skip += 1
122
123 if arg in PYDEVD_OPTS:
124 pydevd.append(arg)
125 if nextarg is not None:
126 pydevd.append(nextarg)
127 skip += 1
128 elif arg in PYDEVD_FLAGS:
129 pydevd.append(arg)
130 elif arg == '--nodebug':
131 supported.append(arg)
132
133 # ptvsd support
134 elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):
135 if arg == '-m' or arg == '--pid':
136 gottarget = True
137 supported.append(arg)
138 if nextarg is not None:
139 supported.append(nextarg)
140 skip += 1
141 elif arg in ('--single-session', '--wait'):
142 supported.append(arg)
143 elif not arg.startswith('-'):
144 supported.append(arg)
145 gottarget = True
146
147 # unsupported arg
148 else:
149 supported.append(arg)
150 break
151
152 return supported, pydevd, script
153
154
155 def _parse_args(prog, argv):
156 parser = argparse.ArgumentParser(
157 prog=prog,
158 usage=USAGE.format(prog),
159 )
160
161 parser.add_argument('--nodebug', action='store_true')
162
163 host = parser.add_mutually_exclusive_group()
164 host.add_argument('--host')
165 host.add_argument('--server-host')
166 parser.add_argument('--port', type=int, required=True)
167
168 target = parser.add_mutually_exclusive_group(required=True)
169 target.add_argument('-m', dest='module')
170 target.add_argument('--pid', type=int)
171 target.add_argument('filename', nargs='?')
172
173 parser.add_argument('--single-session', action='store_true')
174 parser.add_argument('--wait', action='store_true')
175
176 parser.add_argument('-V', '--version', action='version')
177 parser.version = __version__
178
179 args = parser.parse_args(argv)
180 ns = vars(args)
181
182 serverhost = ns.pop('server_host', None)
183 clienthost = ns.pop('host', None)
184 if serverhost:
185 args.address = Address.as_server(serverhost, ns.pop('port'))
186 elif not clienthost:
187 if args.nodebug:
188 args.address = Address.as_client(clienthost, ns.pop('port'))
189 else:
190 args.address = Address.as_server(clienthost, ns.pop('port'))
191 else:
192 args.address = Address.as_client(clienthost, ns.pop('port'))
193
194 pid = ns.pop('pid')
195 module = ns.pop('module')
196 filename = ns.pop('filename')
197 if pid is not None:
198 args.name = pid
199 args.kind = 'pid'
200 elif module is not None:
201 args.name = module
202 args.kind = 'module'
203 else:
204 args.name = filename
205 args.kind = 'script'
206
207 return args
208
209
210 def handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):
211 if kind == 'pid':
212 attach_main(addr, name, *extra, **kwargs)
213 elif nodebug:
214 run_main(addr, name, kind, *extra, **kwargs)
215 else:
216 debug_main(addr, name, kind, *extra, **kwargs)
217
218
219 def main(argv=None):
220 args, extra = parse_args(argv)
221 handle_args(args.address, args.name, args.kind, extra,
222 nodebug=args.nodebug, singlesession=args.single_session,
223 wait=args.wait)
224
225
226 if __name__ == '__main__':
227 main()
228
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py
--- a/ptvsd/__main__.py
+++ b/ptvsd/__main__.py
@@ -24,8 +24,6 @@
PYDEVD_OPTS = {
'--file',
- '--client',
- #'--port',
'--vm_type',
}
@@ -45,8 +43,8 @@
}
USAGE = """
- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]
- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]
+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]
+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]
{0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID
""" # noqa
@@ -106,9 +104,7 @@
if gottarget:
script = argv[i:] + script
break
- if arg == '--client':
- arg = '--host'
- elif arg == '--file':
+ if arg == '--file':
if nextarg is None: # The filename is missing...
pydevd.append(arg)
continue # This will get handled later.
@@ -131,14 +127,14 @@
supported.append(arg)
# ptvsd support
- elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):
+ elif arg in ('--host', '--port', '--pid', '-m'):
if arg == '-m' or arg == '--pid':
gottarget = True
supported.append(arg)
if nextarg is not None:
supported.append(nextarg)
skip += 1
- elif arg in ('--single-session', '--wait'):
+ elif arg in ('--single-session', '--wait', '--client'):
supported.append(arg)
elif not arg.startswith('-'):
supported.append(arg)
@@ -159,10 +155,9 @@
)
parser.add_argument('--nodebug', action='store_true')
+ parser.add_argument('--client', action='store_true')
- host = parser.add_mutually_exclusive_group()
- host.add_argument('--host')
- host.add_argument('--server-host')
+ parser.add_argument('--host')
parser.add_argument('--port', type=int, required=True)
target = parser.add_mutually_exclusive_group(required=True)
@@ -179,17 +174,10 @@
args = parser.parse_args(argv)
ns = vars(args)
- serverhost = ns.pop('server_host', None)
- clienthost = ns.pop('host', None)
- if serverhost:
- args.address = Address.as_server(serverhost, ns.pop('port'))
- elif not clienthost:
- if args.nodebug:
- args.address = Address.as_client(clienthost, ns.pop('port'))
- else:
- args.address = Address.as_server(clienthost, ns.pop('port'))
- else:
- args.address = Address.as_client(clienthost, ns.pop('port'))
+ host = ns.pop('host', None)
+ port = ns.pop('port')
+ client = ns.pop('client')
+ args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa
pid = ns.pop('pid')
module = ns.pop('module')
|
{"golden_diff": "diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py\n--- a/ptvsd/__main__.py\n+++ b/ptvsd/__main__.py\n@@ -24,8 +24,6 @@\n \n PYDEVD_OPTS = {\n '--file',\n- '--client',\n- #'--port',\n '--vm_type',\n }\n \n@@ -45,8 +43,8 @@\n }\n \n USAGE = \"\"\"\n- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]\n- {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]\n+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n+ {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n \"\"\" # noqa\n \n@@ -106,9 +104,7 @@\n if gottarget:\n script = argv[i:] + script\n break\n- if arg == '--client':\n- arg = '--host'\n- elif arg == '--file':\n+ if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n@@ -131,14 +127,14 @@\n supported.append(arg)\n \n # ptvsd support\n- elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):\n+ elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n- elif arg in ('--single-session', '--wait'):\n+ elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n@@ -159,10 +155,9 @@\n )\n \n parser.add_argument('--nodebug', action='store_true')\n+ parser.add_argument('--client', action='store_true')\n \n- host = parser.add_mutually_exclusive_group()\n- host.add_argument('--host')\n- host.add_argument('--server-host')\n+ parser.add_argument('--host')\n parser.add_argument('--port', type=int, required=True)\n \n target = parser.add_mutually_exclusive_group(required=True)\n@@ -179,17 +174,10 @@\n args = parser.parse_args(argv)\n ns = vars(args)\n \n- serverhost = ns.pop('server_host', None)\n- clienthost = ns.pop('host', None)\n- if serverhost:\n- args.address = Address.as_server(serverhost, ns.pop('port'))\n- elif not clienthost:\n- if args.nodebug:\n- args.address = Address.as_client(clienthost, ns.pop('port'))\n- else:\n- args.address = Address.as_server(clienthost, ns.pop('port'))\n- else:\n- args.address = Address.as_client(clienthost, ns.pop('port'))\n+ host = ns.pop('host', None)\n+ port = ns.pop('port')\n+ client = ns.pop('client')\n+ args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n \n pid = ns.pop('pid')\n module = ns.pop('module')\n", "issue": "Changes in CLI arguments between versions\n## Environment data\r\n\r\n- PTVSD version: 4.1.3\r\n- Using VS Code or Visual Studio: N/A\r\n\r\n## Actual behavior\r\n\r\n* CLI args for starting apps for remote debugging has changed \r\n* Now the arg requires `--server-host`, this wasn't the case in the past.\r\n* This is a breaking change.\r\n\r\n## Expected behavior\r\n\r\n* We need to revert if possible as existing users will not be able to use PTSVD for remote debugging scenarios.\r\n\r\nReported here https://github.com/Microsoft/vscode-python/issues/2833#issuecomment-428422616\r\n\n", "before_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--client',\n #'--port',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--host HOST | --server-host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--client':\n arg = '--host'\n elif arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--server-host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n\n host = parser.add_mutually_exclusive_group()\n host.add_argument('--host')\n host.add_argument('--server-host')\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n serverhost = ns.pop('server_host', None)\n clienthost = ns.pop('host', None)\n if serverhost:\n args.address = Address.as_server(serverhost, ns.pop('port'))\n elif not clienthost:\n if args.nodebug:\n args.address = Address.as_client(clienthost, ns.pop('port'))\n else:\n args.address = Address.as_server(clienthost, ns.pop('port'))\n else:\n args.address = Address.as_client(clienthost, ns.pop('port'))\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n", "path": "ptvsd/__main__.py"}], "after_files": [{"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n\n parser.add_argument('--host')\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n host = ns.pop('host', None)\n port = ns.pop('port')\n client = ns.pop('client')\n args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n", "path": "ptvsd/__main__.py"}]}
| 2,534 | 837 |
gh_patches_debug_17675
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-11453
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Say all on Desktop raises an error
### Steps to reproduce:
1. Focus the desktop.
2. Invoke caret say all
### Actual behavior:
The following error is raised:
```
ERROR - scriptHandler.executeScript (15:54:57.769):
error executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'
Traceback (most recent call last):
File "scriptHandler.pyc", line 190, in executeScript
File "globalCommands.pyc", line 1334, in script_sayAll
File "sayAllHandler.pyc", line 79, in readText
File "sayAllHandler.pyc", line 119, in nextLine
AttributeError: '_TextReader' object has no attribute 'reader'
ERROR - stderr (15:54:57.779):
Exception ignored in:
ERROR - stderr (15:54:57.790):
<function _TextReader.__del__ at 0x0462F390>
ERROR - stderr (15:54:57.803):
Traceback (most recent call last):
ERROR - stderr (15:54:57.815):
File "sayAllHandler.pyc", line 213, in __del__
ERROR - stderr (15:54:57.827):
File "sayAllHandler.pyc", line 206, in stop
ERROR - stderr (15:54:57.839):
AttributeError
ERROR - stderr (15:54:57.851):
:
ERROR - stderr (15:54:57.863):
'_TextReader' object has no attribute 'reader'
```
### Expected behavior:
NO error
### System configuration
#### NVDA installed/portable/running from source:
Installed
#### NVDA version:
threshold-18069
#### Windows version:
Windows 10 1903 build 18362.239
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/sayAllHandler.py`
Content:
```
1 # A part of NonVisual Desktop Access (NVDA)
2 # Copyright (C) 2006-2017 NV Access Limited
3 # This file may be used under the terms of the GNU General Public License, version 2 or later.
4 # For more details see: https://www.gnu.org/licenses/gpl-2.0.html
5
6 import weakref
7 import speech
8 import synthDriverHandler
9 from logHandler import log
10 import config
11 import controlTypes
12 import api
13 import textInfos
14 import queueHandler
15 import winKernel
16
17 CURSOR_CARET = 0
18 CURSOR_REVIEW = 1
19
20 lastSayAllMode = None
21 #: The active say all manager.
22 #: This is a weakref because the manager should be allowed to die once say all is complete.
23 _activeSayAll = lambda: None # Return None when called like a dead weakref.
24
25 def stop():
26 active = _activeSayAll()
27 if active:
28 active.stop()
29
30 def isRunning():
31 """Determine whether say all is currently running.
32 @return: C{True} if say all is currently running, C{False} if not.
33 @rtype: bool
34 """
35 return bool(_activeSayAll())
36
37 def readObjects(obj):
38 global _activeSayAll
39 reader = _ObjectsReader(obj)
40 _activeSayAll = weakref.ref(reader)
41 reader.next()
42
43 class _ObjectsReader(object):
44
45 def __init__(self, root):
46 self.walker = self.walk(root)
47 self.prevObj = None
48
49 def walk(self, obj):
50 yield obj
51 child=obj.simpleFirstChild
52 while child:
53 for descendant in self.walk(child):
54 yield descendant
55 child=child.simpleNext
56
57 def next(self):
58 if not self.walker:
59 # We were stopped.
60 return
61 if self.prevObj:
62 # We just started speaking this object, so move the navigator to it.
63 api.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)
64 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
65 # Move onto the next object.
66 self.prevObj = obj = next(self.walker, None)
67 if not obj:
68 return
69 # Call this method again when we start speaking this object.
70 callbackCommand = speech.CallbackCommand(self.next, name="say-all:next")
71 speech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)
72
73 def stop(self):
74 self.walker = None
75
76 def readText(cursor):
77 global lastSayAllMode, _activeSayAll
78 lastSayAllMode=cursor
79 try:
80 reader = _TextReader(cursor)
81 except NotImplementedError:
82 log.debugWarning("Unable to make reader", exc_info=True)
83 return
84 _activeSayAll = weakref.ref(reader)
85 reader.nextLine()
86
87 class _TextReader(object):
88 """Manages continuous reading of text.
89 This is intended for internal use only.
90
91 The high level flow of control is as follows:
92 1. The constructor sets things up.
93 2. L{nextLine} is called to read the first line.
94 3. When it speaks a line, L{nextLine} request that L{lineReached} be called
95 when we start speaking this line, providing the position and state at this point.
96 4. When we start speaking a line, L{lineReached} is called
97 and moves the cursor to that line.
98 5. L{lineReached} calls L{nextLine}.
99 6. If there are more lines, L{nextLine} works as per steps 3 and 4.
100 7. Otherwise, if the object doesn't support page turns, we're finished.
101 8. If the object does support page turns,
102 we request that L{turnPage} be called when speech is finished.
103 9. L{turnPage} tries to turn the page.
104 10. If there are no more pages, we're finished.
105 11. If there is another page, L{turnPage} calls L{nextLine}.
106 """
107 MAX_BUFFERED_LINES = 10
108
109 def __init__(self, cursor):
110 self.cursor = cursor
111 self.trigger = SayAllProfileTrigger()
112 self.trigger.enter()
113 # Start at the cursor.
114 if cursor == CURSOR_CARET:
115 try:
116 self.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
117 except (NotImplementedError, RuntimeError) as e:
118 raise NotImplementedError("Unable to make TextInfo: " + str(e))
119 else:
120 self.reader = api.getReviewPosition()
121 self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
122 self.numBufferedLines = 0
123
124 def nextLine(self):
125 if not self.reader:
126 log.debug("no self.reader")
127 # We were stopped.
128 return
129 if not self.reader.obj:
130 log.debug("no self.reader.obj")
131 # The object died, so we should too.
132 self.finish()
133 return
134 bookmark = self.reader.bookmark
135 # Expand to the current line.
136 # We use move end rather than expand
137 # because the user might start in the middle of a line
138 # and we don't want to read from the start of the line in that case.
139 # For lines after the first, it's also more efficient because
140 # we're already at the start of the line, so there's no need to search backwards.
141 delta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint="end")
142 if delta <= 0:
143 # No more text.
144 if isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):
145 # Once the last line finishes reading, try turning the page.
146 cb = speech.CallbackCommand(self.turnPage, name="say-all:turnPage")
147 speech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])
148 else:
149 self.finish()
150 return
151
152 # Copy the speakTextInfoState so that speak callbackCommand
153 # and its associated callback are using a copy isolated to this specific line.
154 state = self.speakTextInfoState.copy()
155 # Call lineReached when we start speaking this line.
156 # lineReached will move the cursor and trigger reading of the next line.
157
158 def _onLineReached(obj=self.reader.obj, state=state):
159 self.lineReached(obj, bookmark, state)
160
161 cb = speech.CallbackCommand(
162 _onLineReached,
163 name="say-all:lineReached"
164 )
165
166 # Generate the speech sequence for the reader textInfo
167 # and insert the lineReached callback at the very beginning of the sequence.
168 # _linePrefix on speakTextInfo cannot be used here
169 # As it would be inserted in the sequence after all initial control starts which is too late.
170 speechGen = speech.getTextInfoSpeech(
171 self.reader,
172 unit=textInfos.UNIT_READINGCHUNK,
173 reason=controlTypes.REASON_SAYALL,
174 useCache=state
175 )
176 seq = list(speech._flattenNestedSequences(speechGen))
177 seq.insert(0, cb)
178 # Speak the speech sequence.
179 spoke = speech.speakWithoutPauses(seq)
180 # Update the textInfo state ready for when speaking the next line.
181 self.speakTextInfoState = state.copy()
182
183 # Collapse to the end of this line, ready to read the next.
184 try:
185 self.reader.collapse(end=True)
186 except RuntimeError:
187 # This occurs in Microsoft Word when the range covers the end of the document.
188 # without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.
189 self.finish()
190 return
191 if not spoke:
192 # This line didn't include a natural pause, so nothing was spoken.
193 self.numBufferedLines += 1
194 if self.numBufferedLines < self.MAX_BUFFERED_LINES:
195 # Move on to the next line.
196 # We queue this to allow the user a chance to stop say all.
197 queueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)
198 else:
199 # We don't want to buffer too much.
200 # Force speech. lineReached will resume things when speech catches up.
201 speech.speakWithoutPauses(None)
202 # The first buffered line has now started speaking.
203 self.numBufferedLines -= 1
204
205 def lineReached(self, obj, bookmark, state):
206 # We've just started speaking this line, so move the cursor there.
207 state.updateObj()
208 updater = obj.makeTextInfo(bookmark)
209 if self.cursor == CURSOR_CARET:
210 updater.updateCaret()
211 if self.cursor != CURSOR_CARET or config.conf["reviewCursor"]["followCaret"]:
212 api.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)
213 winKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)
214 if self.numBufferedLines == 0:
215 # This was the last line spoken, so move on.
216 self.nextLine()
217 else:
218 self.numBufferedLines -= 1
219
220 def turnPage(self):
221 try:
222 self.reader.obj.turnPage()
223 except RuntimeError:
224 log.debug("No more pages")
225 # No more pages.
226 self.stop()
227 return
228 self.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)
229 self.nextLine()
230
231 def finish(self):
232 # There is no more text.
233 # Call stop to clean up, but only after speech completely finishes.
234 # Otherwise, if a different synth is being used for say all,
235 # we might switch synths too early and truncate the final speech.
236 # We do this by putting a CallbackCommand at the start of a new utterance.
237 cb = speech.CallbackCommand(self.stop, name="say-all:stop")
238 speech.speakWithoutPauses([
239 speech.EndUtteranceCommand(),
240 cb,
241 speech.EndUtteranceCommand()
242 ])
243
244 def stop(self):
245 if not self.reader:
246 return
247 self.reader = None
248 self.trigger.exit()
249 self.trigger = None
250
251 def __del__(self):
252 self.stop()
253
254 class SayAllProfileTrigger(config.ProfileTrigger):
255 """A configuration profile trigger for when say all is in progress.
256 """
257 spec = "sayAll"
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py
--- a/source/sayAllHandler.py
+++ b/source/sayAllHandler.py
@@ -109,7 +109,7 @@
def __init__(self, cursor):
self.cursor = cursor
self.trigger = SayAllProfileTrigger()
- self.trigger.enter()
+ self.reader = None
# Start at the cursor.
if cursor == CURSOR_CARET:
try:
@@ -118,6 +118,8 @@
raise NotImplementedError("Unable to make TextInfo: " + str(e))
else:
self.reader = api.getReviewPosition()
+ # #10899: SayAll profile can't be activated earlier because they may not be anything to read
+ self.trigger.enter()
self.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)
self.numBufferedLines = 0
|
{"golden_diff": "diff --git a/source/sayAllHandler.py b/source/sayAllHandler.py\n--- a/source/sayAllHandler.py\n+++ b/source/sayAllHandler.py\n@@ -109,7 +109,7 @@\n \tdef __init__(self, cursor):\r\n \t\tself.cursor = cursor\r\n \t\tself.trigger = SayAllProfileTrigger()\r\n-\t\tself.trigger.enter()\r\n+\t\tself.reader = None\r\n \t\t# Start at the cursor.\r\n \t\tif cursor == CURSOR_CARET:\r\n \t\t\ttry:\r\n@@ -118,6 +118,8 @@\n \t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n \t\telse:\r\n \t\t\tself.reader = api.getReviewPosition()\r\n+\t\t# #10899: SayAll profile can't be activated earlier because they may not be anything to read\r\n+\t\tself.trigger.enter()\r\n \t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n \t\tself.numBufferedLines = 0\n", "issue": "Say all on Desktop raises an error\n### Steps to reproduce:\r\n1. Focus the desktop.\r\n2. Invoke caret say all\r\n\r\n### Actual behavior:\r\nThe following error is raised:\r\n\r\n```\r\nERROR - scriptHandler.executeScript (15:54:57.769):\r\nerror executing script: <bound method GlobalCommands.script_sayAll of <globalCommands.GlobalCommands object at 0x05875770>> with gesture 'NVDA+a'\r\nTraceback (most recent call last):\r\n File \"scriptHandler.pyc\", line 190, in executeScript\r\n File \"globalCommands.pyc\", line 1334, in script_sayAll\r\n File \"sayAllHandler.pyc\", line 79, in readText\r\n File \"sayAllHandler.pyc\", line 119, in nextLine\r\nAttributeError: '_TextReader' object has no attribute 'reader'\r\nERROR - stderr (15:54:57.779):\r\nException ignored in:\r\nERROR - stderr (15:54:57.790):\r\n<function _TextReader.__del__ at 0x0462F390>\r\nERROR - stderr (15:54:57.803):\r\nTraceback (most recent call last):\r\nERROR - stderr (15:54:57.815):\r\n File \"sayAllHandler.pyc\", line 213, in __del__\r\nERROR - stderr (15:54:57.827):\r\n File \"sayAllHandler.pyc\", line 206, in stop\r\nERROR - stderr (15:54:57.839):\r\nAttributeError\r\nERROR - stderr (15:54:57.851):\r\n:\r\nERROR - stderr (15:54:57.863):\r\n'_TextReader' object has no attribute 'reader'\r\n```\r\n\r\n### Expected behavior:\r\nNO error\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled\r\n\r\n#### NVDA version:\r\nthreshold-18069\r\n\r\n#### Windows version:\r\nWindows 10 1903 build 18362.239\r\n\n", "before_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2017 NV Access Limited\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\nimport weakref\r\nimport speech\r\nimport synthDriverHandler\r\nfrom logHandler import log\r\nimport config\r\nimport controlTypes\r\nimport api\r\nimport textInfos\r\nimport queueHandler\r\nimport winKernel\r\n\r\nCURSOR_CARET = 0\r\nCURSOR_REVIEW = 1\r\n\r\nlastSayAllMode = None\r\n#: The active say all manager.\r\n#: This is a weakref because the manager should be allowed to die once say all is complete.\r\n_activeSayAll = lambda: None # Return None when called like a dead weakref.\r\n\r\ndef stop():\r\n\tactive = _activeSayAll()\r\n\tif active:\r\n\t\tactive.stop()\r\n\r\ndef isRunning():\r\n\t\"\"\"Determine whether say all is currently running.\r\n\t@return: C{True} if say all is currently running, C{False} if not.\r\n\t@rtype: bool\r\n\t\"\"\"\r\n\treturn bool(_activeSayAll())\r\n\r\ndef readObjects(obj):\r\n\tglobal _activeSayAll\r\n\treader = _ObjectsReader(obj)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.next()\r\n\r\nclass _ObjectsReader(object):\r\n\r\n\tdef __init__(self, root):\r\n\t\tself.walker = self.walk(root)\r\n\t\tself.prevObj = None\r\n\r\n\tdef walk(self, obj):\r\n\t\tyield obj\r\n\t\tchild=obj.simpleFirstChild\r\n\t\twhile child:\r\n\t\t\tfor descendant in self.walk(child):\r\n\t\t\t\tyield descendant\r\n\t\t\tchild=child.simpleNext\r\n\r\n\tdef next(self):\r\n\t\tif not self.walker:\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif self.prevObj:\r\n\t\t\t# We just started speaking this object, so move the navigator to it.\r\n\t\t\tapi.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)\r\n\t\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\t# Move onto the next object.\r\n\t\tself.prevObj = obj = next(self.walker, None)\r\n\t\tif not obj:\r\n\t\t\treturn\r\n\t\t# Call this method again when we start speaking this object.\r\n\t\tcallbackCommand = speech.CallbackCommand(self.next, name=\"say-all:next\")\r\n\t\tspeech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)\r\n\r\n\tdef stop(self):\r\n\t\tself.walker = None\r\n\r\ndef readText(cursor):\r\n\tglobal lastSayAllMode, _activeSayAll\r\n\tlastSayAllMode=cursor\r\n\ttry:\r\n\t\treader = _TextReader(cursor)\r\n\texcept NotImplementedError:\r\n\t\tlog.debugWarning(\"Unable to make reader\", exc_info=True)\r\n\t\treturn\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.nextLine()\r\n\r\nclass _TextReader(object):\r\n\t\"\"\"Manages continuous reading of text.\r\n\tThis is intended for internal use only.\r\n\r\n\tThe high level flow of control is as follows:\r\n\t1. The constructor sets things up.\r\n\t2. L{nextLine} is called to read the first line.\r\n\t3. When it speaks a line, L{nextLine} request that L{lineReached} be called\r\n\t\twhen we start speaking this line, providing the position and state at this point.\r\n\t4. When we start speaking a line, L{lineReached} is called\r\n\t\tand moves the cursor to that line.\r\n\t5. L{lineReached} calls L{nextLine}.\r\n\t6. If there are more lines, L{nextLine} works as per steps 3 and 4.\r\n\t7. Otherwise, if the object doesn't support page turns, we're finished.\r\n\t8. If the object does support page turns,\r\n\t\twe request that L{turnPage} be called when speech is finished.\r\n\t9. L{turnPage} tries to turn the page.\r\n\t10. If there are no more pages, we're finished.\r\n\t11. If there is another page, L{turnPage} calls L{nextLine}.\r\n\t\"\"\"\r\n\tMAX_BUFFERED_LINES = 10\r\n\r\n\tdef __init__(self, cursor):\r\n\t\tself.cursor = cursor\r\n\t\tself.trigger = SayAllProfileTrigger()\r\n\t\tself.trigger.enter()\r\n\t\t# Start at the cursor.\r\n\t\tif cursor == CURSOR_CARET:\r\n\t\t\ttry:\r\n\t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n\t\t\texcept (NotImplementedError, RuntimeError) as e:\r\n\t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n\t\telse:\r\n\t\t\tself.reader = api.getReviewPosition()\r\n\t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n\t\tself.numBufferedLines = 0\r\n\r\n\tdef nextLine(self):\r\n\t\tif not self.reader:\r\n\t\t\tlog.debug(\"no self.reader\")\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif not self.reader.obj:\r\n\t\t\tlog.debug(\"no self.reader.obj\")\r\n\t\t\t# The object died, so we should too.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tbookmark = self.reader.bookmark\r\n\t\t# Expand to the current line.\r\n\t\t# We use move end rather than expand\r\n\t\t# because the user might start in the middle of a line\r\n\t\t# and we don't want to read from the start of the line in that case.\r\n\t\t# For lines after the first, it's also more efficient because\r\n\t\t# we're already at the start of the line, so there's no need to search backwards.\r\n\t\tdelta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint=\"end\")\r\n\t\tif delta <= 0:\r\n\t\t\t# No more text.\r\n\t\t\tif isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):\r\n\t\t\t\t# Once the last line finishes reading, try turning the page.\r\n\t\t\t\tcb = speech.CallbackCommand(self.turnPage, name=\"say-all:turnPage\")\r\n\t\t\t\tspeech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])\r\n\t\t\telse:\r\n\t\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\t\t# Copy the speakTextInfoState so that speak callbackCommand\r\n\t\t# and its associated callback are using a copy isolated to this specific line.\r\n\t\tstate = self.speakTextInfoState.copy()\r\n\t\t# Call lineReached when we start speaking this line.\r\n\t\t# lineReached will move the cursor and trigger reading of the next line.\r\n\r\n\t\tdef _onLineReached(obj=self.reader.obj, state=state):\r\n\t\t\tself.lineReached(obj, bookmark, state)\r\n\r\n\t\tcb = speech.CallbackCommand(\r\n\t\t\t_onLineReached,\r\n\t\t\tname=\"say-all:lineReached\"\r\n\t\t)\r\n\r\n\t\t# Generate the speech sequence for the reader textInfo\r\n\t\t# and insert the lineReached callback at the very beginning of the sequence.\r\n\t\t# _linePrefix on speakTextInfo cannot be used here\r\n\t\t# As it would be inserted in the sequence after all initial control starts which is too late.\r\n\t\tspeechGen = speech.getTextInfoSpeech(\r\n\t\t\tself.reader,\r\n\t\t\tunit=textInfos.UNIT_READINGCHUNK,\r\n\t\t\treason=controlTypes.REASON_SAYALL,\r\n\t\t\tuseCache=state\r\n\t\t)\r\n\t\tseq = list(speech._flattenNestedSequences(speechGen))\r\n\t\tseq.insert(0, cb)\r\n\t\t# Speak the speech sequence.\r\n\t\tspoke = speech.speakWithoutPauses(seq)\r\n\t\t# Update the textInfo state ready for when speaking the next line.\r\n\t\tself.speakTextInfoState = state.copy()\r\n\r\n\t\t# Collapse to the end of this line, ready to read the next.\r\n\t\ttry:\r\n\t\t\tself.reader.collapse(end=True)\r\n\t\texcept RuntimeError:\r\n\t\t\t# This occurs in Microsoft Word when the range covers the end of the document.\r\n\t\t\t# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tif not spoke:\r\n\t\t\t# This line didn't include a natural pause, so nothing was spoken.\r\n\t\t\tself.numBufferedLines += 1\r\n\t\t\tif self.numBufferedLines < self.MAX_BUFFERED_LINES:\r\n\t\t\t\t# Move on to the next line.\r\n\t\t\t\t# We queue this to allow the user a chance to stop say all.\r\n\t\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)\r\n\t\t\telse:\r\n\t\t\t\t# We don't want to buffer too much.\r\n\t\t\t\t# Force speech. lineReached will resume things when speech catches up.\r\n\t\t\t\tspeech.speakWithoutPauses(None)\r\n\t\t\t\t# The first buffered line has now started speaking.\r\n\t\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef lineReached(self, obj, bookmark, state):\r\n\t\t# We've just started speaking this line, so move the cursor there.\r\n\t\tstate.updateObj()\r\n\t\tupdater = obj.makeTextInfo(bookmark)\r\n\t\tif self.cursor == CURSOR_CARET:\r\n\t\t\tupdater.updateCaret()\r\n\t\tif self.cursor != CURSOR_CARET or config.conf[\"reviewCursor\"][\"followCaret\"]:\r\n\t\t\tapi.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)\r\n\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\tif self.numBufferedLines == 0:\r\n\t\t\t# This was the last line spoken, so move on.\r\n\t\t\tself.nextLine()\r\n\t\telse:\r\n\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef turnPage(self):\r\n\t\ttry:\r\n\t\t\tself.reader.obj.turnPage()\r\n\t\texcept RuntimeError:\r\n\t\t\tlog.debug(\"No more pages\")\r\n\t\t\t# No more pages.\r\n\t\t\tself.stop()\r\n\t\t\treturn\r\n\t\tself.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)\r\n\t\tself.nextLine()\r\n\r\n\tdef finish(self):\r\n\t\t# There is no more text.\r\n\t\t# Call stop to clean up, but only after speech completely finishes.\r\n\t\t# Otherwise, if a different synth is being used for say all,\r\n\t\t# we might switch synths too early and truncate the final speech.\r\n\t\t# We do this by putting a CallbackCommand at the start of a new utterance.\r\n\t\tcb = speech.CallbackCommand(self.stop, name=\"say-all:stop\")\r\n\t\tspeech.speakWithoutPauses([\r\n\t\t\tspeech.EndUtteranceCommand(),\r\n\t\t\tcb,\r\n\t\t\tspeech.EndUtteranceCommand()\r\n\t\t])\r\n\r\n\tdef stop(self):\r\n\t\tif not self.reader:\r\n\t\t\treturn\r\n\t\tself.reader = None\r\n\t\tself.trigger.exit()\r\n\t\tself.trigger = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.stop()\r\n\r\nclass SayAllProfileTrigger(config.ProfileTrigger):\r\n\t\"\"\"A configuration profile trigger for when say all is in progress.\r\n\t\"\"\"\r\n\tspec = \"sayAll\"\r\n", "path": "source/sayAllHandler.py"}], "after_files": [{"content": "# A part of NonVisual Desktop Access (NVDA)\r\n# Copyright (C) 2006-2017 NV Access Limited\r\n# This file may be used under the terms of the GNU General Public License, version 2 or later.\r\n# For more details see: https://www.gnu.org/licenses/gpl-2.0.html\r\n\r\nimport weakref\r\nimport speech\r\nimport synthDriverHandler\r\nfrom logHandler import log\r\nimport config\r\nimport controlTypes\r\nimport api\r\nimport textInfos\r\nimport queueHandler\r\nimport winKernel\r\n\r\nCURSOR_CARET = 0\r\nCURSOR_REVIEW = 1\r\n\r\nlastSayAllMode = None\r\n#: The active say all manager.\r\n#: This is a weakref because the manager should be allowed to die once say all is complete.\r\n_activeSayAll = lambda: None # Return None when called like a dead weakref.\r\n\r\ndef stop():\r\n\tactive = _activeSayAll()\r\n\tif active:\r\n\t\tactive.stop()\r\n\r\ndef isRunning():\r\n\t\"\"\"Determine whether say all is currently running.\r\n\t@return: C{True} if say all is currently running, C{False} if not.\r\n\t@rtype: bool\r\n\t\"\"\"\r\n\treturn bool(_activeSayAll())\r\n\r\ndef readObjects(obj):\r\n\tglobal _activeSayAll\r\n\treader = _ObjectsReader(obj)\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.next()\r\n\r\nclass _ObjectsReader(object):\r\n\r\n\tdef __init__(self, root):\r\n\t\tself.walker = self.walk(root)\r\n\t\tself.prevObj = None\r\n\r\n\tdef walk(self, obj):\r\n\t\tyield obj\r\n\t\tchild=obj.simpleFirstChild\r\n\t\twhile child:\r\n\t\t\tfor descendant in self.walk(child):\r\n\t\t\t\tyield descendant\r\n\t\t\tchild=child.simpleNext\r\n\r\n\tdef next(self):\r\n\t\tif not self.walker:\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif self.prevObj:\r\n\t\t\t# We just started speaking this object, so move the navigator to it.\r\n\t\t\tapi.setNavigatorObject(self.prevObj, isFocus=lastSayAllMode==CURSOR_CARET)\r\n\t\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\t# Move onto the next object.\r\n\t\tself.prevObj = obj = next(self.walker, None)\r\n\t\tif not obj:\r\n\t\t\treturn\r\n\t\t# Call this method again when we start speaking this object.\r\n\t\tcallbackCommand = speech.CallbackCommand(self.next, name=\"say-all:next\")\r\n\t\tspeech.speakObject(obj, reason=controlTypes.REASON_SAYALL, _prefixSpeechCommand=callbackCommand)\r\n\r\n\tdef stop(self):\r\n\t\tself.walker = None\r\n\r\ndef readText(cursor):\r\n\tglobal lastSayAllMode, _activeSayAll\r\n\tlastSayAllMode=cursor\r\n\ttry:\r\n\t\treader = _TextReader(cursor)\r\n\texcept NotImplementedError:\r\n\t\tlog.debugWarning(\"Unable to make reader\", exc_info=True)\r\n\t\treturn\r\n\t_activeSayAll = weakref.ref(reader)\r\n\treader.nextLine()\r\n\r\nclass _TextReader(object):\r\n\t\"\"\"Manages continuous reading of text.\r\n\tThis is intended for internal use only.\r\n\r\n\tThe high level flow of control is as follows:\r\n\t1. The constructor sets things up.\r\n\t2. L{nextLine} is called to read the first line.\r\n\t3. When it speaks a line, L{nextLine} request that L{lineReached} be called\r\n\t\twhen we start speaking this line, providing the position and state at this point.\r\n\t4. When we start speaking a line, L{lineReached} is called\r\n\t\tand moves the cursor to that line.\r\n\t5. L{lineReached} calls L{nextLine}.\r\n\t6. If there are more lines, L{nextLine} works as per steps 3 and 4.\r\n\t7. Otherwise, if the object doesn't support page turns, we're finished.\r\n\t8. If the object does support page turns,\r\n\t\twe request that L{turnPage} be called when speech is finished.\r\n\t9. L{turnPage} tries to turn the page.\r\n\t10. If there are no more pages, we're finished.\r\n\t11. If there is another page, L{turnPage} calls L{nextLine}.\r\n\t\"\"\"\r\n\tMAX_BUFFERED_LINES = 10\r\n\r\n\tdef __init__(self, cursor):\r\n\t\tself.cursor = cursor\r\n\t\tself.trigger = SayAllProfileTrigger()\r\n\t\tself.reader = None\r\n\t\t# Start at the cursor.\r\n\t\tif cursor == CURSOR_CARET:\r\n\t\t\ttry:\r\n\t\t\t\tself.reader = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)\r\n\t\t\texcept (NotImplementedError, RuntimeError) as e:\r\n\t\t\t\traise NotImplementedError(\"Unable to make TextInfo: \" + str(e))\r\n\t\telse:\r\n\t\t\tself.reader = api.getReviewPosition()\r\n\t\t# #10899: SayAll profile can't be activated earlier because they may not be anything to read\r\n\t\tself.trigger.enter()\r\n\t\tself.speakTextInfoState = speech.SpeakTextInfoState(self.reader.obj)\r\n\t\tself.numBufferedLines = 0\r\n\r\n\tdef nextLine(self):\r\n\t\tif not self.reader:\r\n\t\t\tlog.debug(\"no self.reader\")\r\n\t\t\t# We were stopped.\r\n\t\t\treturn\r\n\t\tif not self.reader.obj:\r\n\t\t\tlog.debug(\"no self.reader.obj\")\r\n\t\t\t# The object died, so we should too.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tbookmark = self.reader.bookmark\r\n\t\t# Expand to the current line.\r\n\t\t# We use move end rather than expand\r\n\t\t# because the user might start in the middle of a line\r\n\t\t# and we don't want to read from the start of the line in that case.\r\n\t\t# For lines after the first, it's also more efficient because\r\n\t\t# we're already at the start of the line, so there's no need to search backwards.\r\n\t\tdelta = self.reader.move(textInfos.UNIT_READINGCHUNK, 1, endPoint=\"end\")\r\n\t\tif delta <= 0:\r\n\t\t\t# No more text.\r\n\t\t\tif isinstance(self.reader.obj, textInfos.DocumentWithPageTurns):\r\n\t\t\t\t# Once the last line finishes reading, try turning the page.\r\n\t\t\t\tcb = speech.CallbackCommand(self.turnPage, name=\"say-all:turnPage\")\r\n\t\t\t\tspeech.speakWithoutPauses([cb, speech.EndUtteranceCommand()])\r\n\t\t\telse:\r\n\t\t\t\tself.finish()\r\n\t\t\treturn\r\n\r\n\t\t# Copy the speakTextInfoState so that speak callbackCommand\r\n\t\t# and its associated callback are using a copy isolated to this specific line.\r\n\t\tstate = self.speakTextInfoState.copy()\r\n\t\t# Call lineReached when we start speaking this line.\r\n\t\t# lineReached will move the cursor and trigger reading of the next line.\r\n\r\n\t\tdef _onLineReached(obj=self.reader.obj, state=state):\r\n\t\t\tself.lineReached(obj, bookmark, state)\r\n\r\n\t\tcb = speech.CallbackCommand(\r\n\t\t\t_onLineReached,\r\n\t\t\tname=\"say-all:lineReached\"\r\n\t\t)\r\n\r\n\t\t# Generate the speech sequence for the reader textInfo\r\n\t\t# and insert the lineReached callback at the very beginning of the sequence.\r\n\t\t# _linePrefix on speakTextInfo cannot be used here\r\n\t\t# As it would be inserted in the sequence after all initial control starts which is too late.\r\n\t\tspeechGen = speech.getTextInfoSpeech(\r\n\t\t\tself.reader,\r\n\t\t\tunit=textInfos.UNIT_READINGCHUNK,\r\n\t\t\treason=controlTypes.REASON_SAYALL,\r\n\t\t\tuseCache=state\r\n\t\t)\r\n\t\tseq = list(speech._flattenNestedSequences(speechGen))\r\n\t\tseq.insert(0, cb)\r\n\t\t# Speak the speech sequence.\r\n\t\tspoke = speech.speakWithoutPauses(seq)\r\n\t\t# Update the textInfo state ready for when speaking the next line.\r\n\t\tself.speakTextInfoState = state.copy()\r\n\r\n\t\t# Collapse to the end of this line, ready to read the next.\r\n\t\ttry:\r\n\t\t\tself.reader.collapse(end=True)\r\n\t\texcept RuntimeError:\r\n\t\t\t# This occurs in Microsoft Word when the range covers the end of the document.\r\n\t\t\t# without this exception to indicate that further collapsing is not possible, say all could enter an infinite loop.\r\n\t\t\tself.finish()\r\n\t\t\treturn\r\n\t\tif not spoke:\r\n\t\t\t# This line didn't include a natural pause, so nothing was spoken.\r\n\t\t\tself.numBufferedLines += 1\r\n\t\t\tif self.numBufferedLines < self.MAX_BUFFERED_LINES:\r\n\t\t\t\t# Move on to the next line.\r\n\t\t\t\t# We queue this to allow the user a chance to stop say all.\r\n\t\t\t\tqueueHandler.queueFunction(queueHandler.eventQueue, self.nextLine)\r\n\t\t\telse:\r\n\t\t\t\t# We don't want to buffer too much.\r\n\t\t\t\t# Force speech. lineReached will resume things when speech catches up.\r\n\t\t\t\tspeech.speakWithoutPauses(None)\r\n\t\t\t\t# The first buffered line has now started speaking.\r\n\t\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef lineReached(self, obj, bookmark, state):\r\n\t\t# We've just started speaking this line, so move the cursor there.\r\n\t\tstate.updateObj()\r\n\t\tupdater = obj.makeTextInfo(bookmark)\r\n\t\tif self.cursor == CURSOR_CARET:\r\n\t\t\tupdater.updateCaret()\r\n\t\tif self.cursor != CURSOR_CARET or config.conf[\"reviewCursor\"][\"followCaret\"]:\r\n\t\t\tapi.setReviewPosition(updater, isCaret=self.cursor==CURSOR_CARET)\r\n\t\twinKernel.SetThreadExecutionState(winKernel.ES_SYSTEM_REQUIRED)\r\n\t\tif self.numBufferedLines == 0:\r\n\t\t\t# This was the last line spoken, so move on.\r\n\t\t\tself.nextLine()\r\n\t\telse:\r\n\t\t\tself.numBufferedLines -= 1\r\n\r\n\tdef turnPage(self):\r\n\t\ttry:\r\n\t\t\tself.reader.obj.turnPage()\r\n\t\texcept RuntimeError:\r\n\t\t\tlog.debug(\"No more pages\")\r\n\t\t\t# No more pages.\r\n\t\t\tself.stop()\r\n\t\t\treturn\r\n\t\tself.reader = self.reader.obj.makeTextInfo(textInfos.POSITION_FIRST)\r\n\t\tself.nextLine()\r\n\r\n\tdef finish(self):\r\n\t\t# There is no more text.\r\n\t\t# Call stop to clean up, but only after speech completely finishes.\r\n\t\t# Otherwise, if a different synth is being used for say all,\r\n\t\t# we might switch synths too early and truncate the final speech.\r\n\t\t# We do this by putting a CallbackCommand at the start of a new utterance.\r\n\t\tcb = speech.CallbackCommand(self.stop, name=\"say-all:stop\")\r\n\t\tspeech.speakWithoutPauses([\r\n\t\t\tspeech.EndUtteranceCommand(),\r\n\t\t\tcb,\r\n\t\t\tspeech.EndUtteranceCommand()\r\n\t\t])\r\n\r\n\tdef stop(self):\r\n\t\tif not self.reader:\r\n\t\t\treturn\r\n\t\tself.reader = None\r\n\t\tself.trigger.exit()\r\n\t\tself.trigger = None\r\n\r\n\tdef __del__(self):\r\n\t\tself.stop()\r\n\r\nclass SayAllProfileTrigger(config.ProfileTrigger):\r\n\t\"\"\"A configuration profile trigger for when say all is in progress.\r\n\t\"\"\"\r\n\tspec = \"sayAll\"\r\n", "path": "source/sayAllHandler.py"}]}
| 3,721 | 211 |
gh_patches_debug_15530
|
rasdani/github-patches
|
git_diff
|
ibis-project__ibis-6950
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
bug(bigquery): memtable and string literals not escaping `\n` or `\` which results in invalid syntax
### What happened?
Code:
```python
import ibis
ibis_client = ibis.bigquery.connect()
table = ibis.memtable(
{
"col1": ["a\tb\nc", "d e f", "g'e\"h"],
}
)
print(ibis_client.compile(table))
```
Output:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b
c' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Note, the following SQL works as expected:
```
SELECT t0.*
FROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\'e"h' AS col1)]) t0
```
Therefore, we should really be escaping `\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals
Alternatively, using triple-quoted strings allows for newline characters in the string literal itself.
### What version of ibis are you using?
6.1.0
also tested on latest commit: 15f8d9575
### What backend(s) are you using, if any?
BigQuery
### Relevant log output
```sh
BigQuery API: Syntax error: Unclosed string literal at [2:47]
```
### Code of Conduct
- [X] I agree to follow this project's Code of Conduct
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ibis/backends/base/sql/registry/literal.py`
Content:
```
1 from __future__ import annotations
2
3 import datetime
4 import math
5
6 import ibis.expr.types as ir
7
8
9 def _set_literal_format(translator, expr):
10 value_type = expr.type().value_type
11
12 formatted = [
13 translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value
14 ]
15
16 return "(" + ", ".join(formatted) + ")"
17
18
19 def _boolean_literal_format(translator, op):
20 return "TRUE" if op.value else "FALSE"
21
22
23 def _string_literal_format(translator, op):
24 return "'{}'".format(op.value.replace("'", "\\'"))
25
26
27 def _number_literal_format(translator, op):
28 if math.isfinite(op.value):
29 formatted = repr(op.value)
30 else:
31 if math.isnan(op.value):
32 formatted_val = "NaN"
33 elif math.isinf(op.value):
34 if op.value > 0:
35 formatted_val = "Infinity"
36 else:
37 formatted_val = "-Infinity"
38 formatted = f"CAST({formatted_val!r} AS DOUBLE)"
39
40 return formatted
41
42
43 def _interval_literal_format(translator, op):
44 return f"INTERVAL {op.value} {op.dtype.resolution.upper()}"
45
46
47 def _date_literal_format(translator, op):
48 value = op.value
49 if isinstance(value, datetime.date):
50 value = value.strftime("%Y-%m-%d")
51
52 return repr(value)
53
54
55 def _timestamp_literal_format(translator, op):
56 value = op.value
57 if isinstance(value, datetime.datetime):
58 value = value.isoformat()
59
60 return repr(value)
61
62
63 literal_formatters = {
64 "boolean": _boolean_literal_format,
65 "number": _number_literal_format,
66 "string": _string_literal_format,
67 "interval": _interval_literal_format,
68 "timestamp": _timestamp_literal_format,
69 "date": _date_literal_format,
70 "set": _set_literal_format,
71 }
72
73
74 def literal(translator, op):
75 """Return the expression as its literal value."""
76
77 dtype = op.dtype
78
79 if op.value is None:
80 return "NULL"
81
82 if dtype.is_boolean():
83 typeclass = "boolean"
84 elif dtype.is_string():
85 typeclass = "string"
86 elif dtype.is_date():
87 typeclass = "date"
88 elif dtype.is_numeric():
89 typeclass = "number"
90 elif dtype.is_timestamp():
91 typeclass = "timestamp"
92 elif dtype.is_interval():
93 typeclass = "interval"
94 else:
95 raise NotImplementedError(f"Unsupported type: {dtype!r}")
96
97 return literal_formatters[typeclass](translator, op)
98
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py
--- a/ibis/backends/base/sql/registry/literal.py
+++ b/ibis/backends/base/sql/registry/literal.py
@@ -21,7 +21,22 @@
def _string_literal_format(translator, op):
- return "'{}'".format(op.value.replace("'", "\\'"))
+ return "'{}'".format(
+ op.value
+ # Escape \ first so we don't double escape other characters.
+ .replace("\\", "\\\\")
+ # Escape ' since we're using those for the string literal.
+ .replace("'", "\\'")
+ # ASCII escape sequences that are recognized in Python:
+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals
+ .replace("\a", "\\a") # Bell
+ .replace("\b", "\\b") # Backspace
+ .replace("\f", "\\f") # Formfeed
+ .replace("\n", "\\n") # Newline / Linefeed
+ .replace("\r", "\\r") # Carriage return
+ .replace("\t", "\\t") # Tab
+ .replace("\v", "\\v") # Vertical tab
+ )
def _number_literal_format(translator, op):
|
{"golden_diff": "diff --git a/ibis/backends/base/sql/registry/literal.py b/ibis/backends/base/sql/registry/literal.py\n--- a/ibis/backends/base/sql/registry/literal.py\n+++ b/ibis/backends/base/sql/registry/literal.py\n@@ -21,7 +21,22 @@\n \n \n def _string_literal_format(translator, op):\n- return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n+ return \"'{}'\".format(\n+ op.value\n+ # Escape \\ first so we don't double escape other characters.\n+ .replace(\"\\\\\", \"\\\\\\\\\")\n+ # Escape ' since we're using those for the string literal.\n+ .replace(\"'\", \"\\\\'\")\n+ # ASCII escape sequences that are recognized in Python:\n+ # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n+ .replace(\"\\a\", \"\\\\a\") # Bell\n+ .replace(\"\\b\", \"\\\\b\") # Backspace\n+ .replace(\"\\f\", \"\\\\f\") # Formfeed\n+ .replace(\"\\n\", \"\\\\n\") # Newline / Linefeed\n+ .replace(\"\\r\", \"\\\\r\") # Carriage return\n+ .replace(\"\\t\", \"\\\\t\") # Tab\n+ .replace(\"\\v\", \"\\\\v\") # Vertical tab\n+ )\n \n \n def _number_literal_format(translator, op):\n", "issue": "bug(bigquery): memtable and string literals not escaping `\\n` or `\\` which results in invalid syntax\n### What happened?\n\nCode:\r\n\r\n```python\r\nimport ibis\r\n\r\nibis_client = ibis.bigquery.connect()\r\ntable = ibis.memtable(\r\n {\r\n \"col1\": [\"a\\tb\\nc\", \"d e f\", \"g'e\\\"h\"],\r\n }\r\n)\r\nprint(ibis_client.compile(table))\r\n```\r\n\r\nOutput:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\r\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nNote, the following SQL works as expected:\r\n\r\n```\r\nSELECT t0.*\r\nFROM UNNEST(ARRAY<STRUCT<col1 STRING>>[STRUCT('a b\\nc' AS col1), STRUCT('d e f' AS col1), STRUCT('g\\'e\"h' AS col1)]) t0\r\n```\r\n\r\nTherefore, we should really be escaping `\\n` in addition to `'`. Though, perhaps there are other characters that could break BigQuery syntax? See: https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical#string_and_bytes_literals\r\n\r\nAlternatively, using triple-quoted strings allows for newline characters in the string literal itself.\n\n### What version of ibis are you using?\n\n6.1.0\r\n\r\nalso tested on latest commit: 15f8d9575\n\n### What backend(s) are you using, if any?\n\nBigQuery\n\n### Relevant log output\n\n```sh\nBigQuery API: Syntax error: Unclosed string literal at [2:47]\n```\n\n\n### Code of Conduct\n\n- [X] I agree to follow this project's Code of Conduct\n", "before_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport math\n\nimport ibis.expr.types as ir\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value\n ]\n\n return \"(\" + \", \".join(formatted) + \")\"\n\n\ndef _boolean_literal_format(translator, op):\n return \"TRUE\" if op.value else \"FALSE\"\n\n\ndef _string_literal_format(translator, op):\n return \"'{}'\".format(op.value.replace(\"'\", \"\\\\'\"))\n\n\ndef _number_literal_format(translator, op):\n if math.isfinite(op.value):\n formatted = repr(op.value)\n else:\n if math.isnan(op.value):\n formatted_val = \"NaN\"\n elif math.isinf(op.value):\n if op.value > 0:\n formatted_val = \"Infinity\"\n else:\n formatted_val = \"-Infinity\"\n formatted = f\"CAST({formatted_val!r} AS DOUBLE)\"\n\n return formatted\n\n\ndef _interval_literal_format(translator, op):\n return f\"INTERVAL {op.value} {op.dtype.resolution.upper()}\"\n\n\ndef _date_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.date):\n value = value.strftime(\"%Y-%m-%d\")\n\n return repr(value)\n\n\ndef _timestamp_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.datetime):\n value = value.isoformat()\n\n return repr(value)\n\n\nliteral_formatters = {\n \"boolean\": _boolean_literal_format,\n \"number\": _number_literal_format,\n \"string\": _string_literal_format,\n \"interval\": _interval_literal_format,\n \"timestamp\": _timestamp_literal_format,\n \"date\": _date_literal_format,\n \"set\": _set_literal_format,\n}\n\n\ndef literal(translator, op):\n \"\"\"Return the expression as its literal value.\"\"\"\n\n dtype = op.dtype\n\n if op.value is None:\n return \"NULL\"\n\n if dtype.is_boolean():\n typeclass = \"boolean\"\n elif dtype.is_string():\n typeclass = \"string\"\n elif dtype.is_date():\n typeclass = \"date\"\n elif dtype.is_numeric():\n typeclass = \"number\"\n elif dtype.is_timestamp():\n typeclass = \"timestamp\"\n elif dtype.is_interval():\n typeclass = \"interval\"\n else:\n raise NotImplementedError(f\"Unsupported type: {dtype!r}\")\n\n return literal_formatters[typeclass](translator, op)\n", "path": "ibis/backends/base/sql/registry/literal.py"}], "after_files": [{"content": "from __future__ import annotations\n\nimport datetime\nimport math\n\nimport ibis.expr.types as ir\n\n\ndef _set_literal_format(translator, expr):\n value_type = expr.type().value_type\n\n formatted = [\n translator.translate(ir.literal(x, type=value_type)) for x in expr.op().value\n ]\n\n return \"(\" + \", \".join(formatted) + \")\"\n\n\ndef _boolean_literal_format(translator, op):\n return \"TRUE\" if op.value else \"FALSE\"\n\n\ndef _string_literal_format(translator, op):\n return \"'{}'\".format(\n op.value\n # Escape \\ first so we don't double escape other characters.\n .replace(\"\\\\\", \"\\\\\\\\\")\n # Escape ' since we're using those for the string literal.\n .replace(\"'\", \"\\\\'\")\n # ASCII escape sequences that are recognized in Python:\n # https://docs.python.org/3/reference/lexical_analysis.html#string-and-bytes-literals\n .replace(\"\\a\", \"\\\\a\") # Bell\n .replace(\"\\b\", \"\\\\b\") # Backspace\n .replace(\"\\f\", \"\\\\f\") # Formfeed\n .replace(\"\\n\", \"\\\\n\") # Newline / Linefeed\n .replace(\"\\r\", \"\\\\r\") # Carriage return\n .replace(\"\\t\", \"\\\\t\") # Tab\n .replace(\"\\v\", \"\\\\v\") # Vertical tab\n )\n\n\ndef _number_literal_format(translator, op):\n if math.isfinite(op.value):\n formatted = repr(op.value)\n else:\n if math.isnan(op.value):\n formatted_val = \"NaN\"\n elif math.isinf(op.value):\n if op.value > 0:\n formatted_val = \"Infinity\"\n else:\n formatted_val = \"-Infinity\"\n formatted = f\"CAST({formatted_val!r} AS DOUBLE)\"\n\n return formatted\n\n\ndef _interval_literal_format(translator, op):\n return f\"INTERVAL {op.value} {op.dtype.resolution.upper()}\"\n\n\ndef _date_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.date):\n value = value.strftime(\"%Y-%m-%d\")\n\n return repr(value)\n\n\ndef _timestamp_literal_format(translator, op):\n value = op.value\n if isinstance(value, datetime.datetime):\n value = value.isoformat()\n\n return repr(value)\n\n\nliteral_formatters = {\n \"boolean\": _boolean_literal_format,\n \"number\": _number_literal_format,\n \"string\": _string_literal_format,\n \"interval\": _interval_literal_format,\n \"timestamp\": _timestamp_literal_format,\n \"date\": _date_literal_format,\n \"set\": _set_literal_format,\n}\n\n\ndef literal(translator, op):\n \"\"\"Return the expression as its literal value.\"\"\"\n\n dtype = op.dtype\n\n if op.value is None:\n return \"NULL\"\n\n if dtype.is_boolean():\n typeclass = \"boolean\"\n elif dtype.is_string():\n typeclass = \"string\"\n elif dtype.is_date():\n typeclass = \"date\"\n elif dtype.is_numeric():\n typeclass = \"number\"\n elif dtype.is_timestamp():\n typeclass = \"timestamp\"\n elif dtype.is_interval():\n typeclass = \"interval\"\n else:\n raise NotImplementedError(f\"Unsupported type: {dtype!r}\")\n\n return literal_formatters[typeclass](translator, op)\n", "path": "ibis/backends/base/sql/registry/literal.py"}]}
| 1,397 | 316 |
gh_patches_debug_9401
|
rasdani/github-patches
|
git_diff
|
vyperlang__vyper-1595
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add docs for assert_modifiable
### What's your issue about?
`assert_modifiable` was added in #1480 add docs for it.
### How can it be fixed?
`^.^`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `docs/conf.py`
Content:
```
1 #!/usr/bin/env python3
2 # -*- coding: utf-8 -*-
3 #
4 # Vyper documentation build configuration file, created by
5 # sphinx-quickstart on Wed Jul 26 11:18:29 2017.
6 #
7 # This file is execfile()d with the current directory set to its
8 # containing dir.
9 #
10 # Note that not all possible configuration values are present in this
11 # autogenerated file.
12 #
13 # All configuration values have a default; values that are commented out
14 # serve to show the default.
15
16 # If extensions (or modules to document with autodoc) are in another directory,
17 # add these directories to sys.path here. If the directory is relative to the
18 # documentation root, use os.path.abspath to make it absolute, like shown here.
19 #
20 # import os
21 # import sys
22 # sys.path.insert(0, os.path.abspath('.'))
23 from recommonmark.parser import CommonMarkParser
24
25 # TO DO - Create and Implement Vyper Lexer
26 # def setup(sphinx):
27 # sys.path.insert(0, os.path.abspath('./utils'))
28 # from SolidityLexer import SolidityLexer
29 # sphinx.add_lexer('Python', SolidityLexer())
30
31
32 # -- General configuration ------------------------------------------------
33
34 # If your documentation needs a minimal Sphinx version, state it here.
35 #
36 # needs_sphinx = '1.0'
37
38 # Add any Sphinx extension module names here, as strings. They can be
39 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
40 # ones.
41 extensions = [
42 'sphinx.ext.autodoc'
43 ]
44
45 # Add any paths that contain templates here, relative to this directory.
46 templates_path = ['_templates']
47
48 # The suffix(es) of source filenames.
49 # You can specify multiple suffix as a list of string:
50 #
51 # source_suffix = ['.rst', '.md']
52 source_suffix = '.rst'
53
54 # The master toctree document.
55 master_doc = 'index'
56
57 # General information about the project.
58 project = 'Vyper'
59 copyright = '2017, Vitalik Buterin'
60 author = 'Vitalik Buterin'
61
62 # The version info for the project you're documenting, acts as replacement for
63 # |version| and |release|, also used in various other places throughout the
64 # built documents.
65 #
66 # The short X.Y version.
67 version = ''
68 # The full version, including alpha/beta/rc tags.
69 release = ''
70
71 # The language for content autogenerated by Sphinx. Refer to documentation
72 # for a list of supported languages.
73 #
74 # This is also used if you do content translation via gettext catalogs.
75 # Usually you set "language" from the command line for these cases.
76 language = 'python'
77
78 # List of patterns, relative to source directory, that match files and
79 # directories to ignore when looking for source files.
80 # This patterns also effect to html_static_path and html_extra_path
81 exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
82
83 # The name of the Pygments (syntax highlighting) style to use.
84 pygments_style = 'sphinx'
85
86 # If true, `todo` and `todoList` produce output, else they produce nothing.
87 todo_include_todos = False
88
89
90 # -- Options for HTML output ----------------------------------------------
91
92 # The theme to use for HTML and HTML Help pages. See the documentation for
93 # a list of builtin themes.
94 #
95 html_theme = "sphinx_rtd_theme"
96
97 # Theme options are theme-specific and customize the look and feel of a theme
98 # further. For a list of options available for each theme, see the
99 # documentation.
100 #
101 # html_theme_options = {}
102
103 # Add any paths that contain custom static files (such as style sheets) here,
104 # relative to this directory. They are copied after the builtin static files,
105 # so a file named "default.css" will overwrite the builtin "default.css".
106 html_static_path = ['_static']
107
108 # Custom sidebar templates, must be a dictionary that maps document names
109 # to template names.
110 #
111 # This is required for the alabaster theme
112 # refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars
113 html_sidebars = {
114 '**': [
115 'about.html',
116 'navigation.html',
117 'relations.html', # needs 'show_related': True theme option to display
118 'searchbox.html',
119 'donate.html',
120 ]
121 }
122
123
124 # -- Options for HTMLHelp output ------------------------------------------
125
126 # Output file base name for HTML help builder.
127 htmlhelp_basename = 'Vyperdoc'
128
129
130 # -- Options for LaTeX output ---------------------------------------------
131
132 latex_elements = {
133 # The paper size ('letterpaper' or 'a4paper').
134 #
135 # 'papersize': 'letterpaper',
136
137 # The font size ('10pt', '11pt' or '12pt').
138 #
139 # 'pointsize': '10pt',
140
141 # Additional stuff for the LaTeX preamble.
142 #
143 # 'preamble': '',
144
145 # Latex figure (float) alignment
146 #
147 # 'figure_align': 'htbp',
148 }
149
150 # Grouping the document tree into LaTeX files. List of tuples
151 # (source start file, target name, title,
152 # author, documentclass [howto, manual, or own class]).
153 latex_documents = [
154 (master_doc, 'Vyper.tex', 'Vyper Documentation',
155 'Vitalik Buterin', 'manual'),
156 ]
157
158
159 # -- Options for manual page output ---------------------------------------
160
161 # One entry per manual page. List of tuples
162 # (source start file, name, description, authors, manual section).
163 man_pages = [
164 (master_doc, 'vyper', 'Vyper Documentation',
165 [author], 1)
166 ]
167
168
169 # -- Options for Texinfo output -------------------------------------------
170
171 # Grouping the document tree into Texinfo files. List of tuples
172 # (source start file, target name, title, author,
173 # dir menu entry, description, category)
174 texinfo_documents = [
175 (master_doc, 'Vyper', 'Vyper Documentation',
176 author, 'Vyper', 'One line description of project.',
177 'Miscellaneous'),
178 ]
179
180 source_parsers = {
181 '.md': CommonMarkParser,
182 }
183
184 source_suffix = ['.rst', '.md']
185
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/docs/conf.py b/docs/conf.py
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -103,7 +103,7 @@
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
+# html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
|
{"golden_diff": "diff --git a/docs/conf.py b/docs/conf.py\n--- a/docs/conf.py\n+++ b/docs/conf.py\n@@ -103,7 +103,7 @@\n # Add any paths that contain custom static files (such as style sheets) here,\n # relative to this directory. They are copied after the builtin static files,\n # so a file named \"default.css\" will overwrite the builtin \"default.css\".\n-html_static_path = ['_static']\n+# html_static_path = ['_static']\n \n # Custom sidebar templates, must be a dictionary that maps document names\n # to template names.\n", "issue": "Add docs for assert_modifiable\n\r\n### What's your issue about?\r\n\r\n`assert_modifiable` was added in #1480 add docs for it.\r\n\r\n### How can it be fixed?\r\n\r\n`^.^`\r\n\n", "before_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Vyper documentation build configuration file, created by\n# sphinx-quickstart on Wed Jul 26 11:18:29 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nfrom recommonmark.parser import CommonMarkParser\n\n# TO DO - Create and Implement Vyper Lexer\n# def setup(sphinx):\n# sys.path.insert(0, os.path.abspath('./utils'))\n# from SolidityLexer import SolidityLexer\n# sphinx.add_lexer('Python', SolidityLexer())\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Vyper'\ncopyright = '2017, Vitalik Buterin'\nauthor = 'Vitalik Buterin'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = ''\n# The full version, including alpha/beta/rc tags.\nrelease = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'python'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n 'donate.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Vyperdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Vyper.tex', 'Vyper Documentation',\n 'Vitalik Buterin', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'vyper', 'Vyper Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Vyper', 'Vyper Documentation',\n author, 'Vyper', 'One line description of project.',\n 'Miscellaneous'),\n]\n\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nsource_suffix = ['.rst', '.md']\n", "path": "docs/conf.py"}], "after_files": [{"content": "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# Vyper documentation build configuration file, created by\n# sphinx-quickstart on Wed Jul 26 11:18:29 2017.\n#\n# This file is execfile()d with the current directory set to its\n# containing dir.\n#\n# Note that not all possible configuration values are present in this\n# autogenerated file.\n#\n# All configuration values have a default; values that are commented out\n# serve to show the default.\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\n# import os\n# import sys\n# sys.path.insert(0, os.path.abspath('.'))\nfrom recommonmark.parser import CommonMarkParser\n\n# TO DO - Create and Implement Vyper Lexer\n# def setup(sphinx):\n# sys.path.insert(0, os.path.abspath('./utils'))\n# from SolidityLexer import SolidityLexer\n# sphinx.add_lexer('Python', SolidityLexer())\n\n\n# -- General configuration ------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc'\n]\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n#\n# source_suffix = ['.rst', '.md']\nsource_suffix = '.rst'\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# General information about the project.\nproject = 'Vyper'\ncopyright = '2017, Vitalik Buterin'\nauthor = 'Vitalik Buterin'\n\n# The version info for the project you're documenting, acts as replacement for\n# |version| and |release|, also used in various other places throughout the\n# built documents.\n#\n# The short X.Y version.\nversion = ''\n# The full version, including alpha/beta/rc tags.\nrelease = ''\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = 'python'\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This patterns also effect to html_static_path and html_extra_path\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = False\n\n\n# -- Options for HTML output ----------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\n#\n# html_theme_options = {}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\n# html_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'relations.html', # needs 'show_related': True theme option to display\n 'searchbox.html',\n 'donate.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'Vyperdoc'\n\n\n# -- Options for LaTeX output ---------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (master_doc, 'Vyper.tex', 'Vyper Documentation',\n 'Vitalik Buterin', 'manual'),\n]\n\n\n# -- Options for manual page output ---------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (master_doc, 'vyper', 'Vyper Documentation',\n [author], 1)\n]\n\n\n# -- Options for Texinfo output -------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (master_doc, 'Vyper', 'Vyper Documentation',\n author, 'Vyper', 'One line description of project.',\n 'Miscellaneous'),\n]\n\nsource_parsers = {\n '.md': CommonMarkParser,\n}\n\nsource_suffix = ['.rst', '.md']\n", "path": "docs/conf.py"}]}
| 2,091 | 124 |
gh_patches_debug_33289
|
rasdani/github-patches
|
git_diff
|
optuna__optuna-2634
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Review constrain scikit-learn< 0.23
scikit-optimize 0.8.1 has been released which works together with scikit-learn >= 0.23
https://github.com/scikit-optimize/scikit-optimize/releases/tag/v0.8.1
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `setup.py`
Content:
```
1 import os
2 from typing import Dict
3 from typing import List
4 from typing import Optional
5
6 import pkg_resources
7 from setuptools import find_packages
8 from setuptools import setup
9
10
11 def get_version() -> str:
12
13 version_filepath = os.path.join(os.path.dirname(__file__), "optuna", "version.py")
14 with open(version_filepath) as f:
15 for line in f:
16 if line.startswith("__version__"):
17 return line.strip().split()[-1][1:-1]
18 assert False
19
20
21 def get_long_description() -> str:
22
23 readme_filepath = os.path.join(os.path.dirname(__file__), "README.md")
24 with open(readme_filepath) as f:
25 return f.read()
26
27
28 def get_install_requires() -> List[str]:
29
30 requirements = [
31 "alembic",
32 "cliff",
33 "cmaes>=0.8.2",
34 "colorlog",
35 "numpy",
36 "packaging>=20.0",
37 "scipy!=1.4.0",
38 "sqlalchemy>=1.1.0",
39 "tqdm",
40 ]
41 return requirements
42
43
44 def get_tests_require() -> List[str]:
45
46 return get_extras_require()["testing"]
47
48
49 def get_extras_require() -> Dict[str, List[str]]:
50
51 requirements = {
52 # TODO(HideakiImamura) Unpin mypy version after fixing "Duplicate modules" error in
53 # examples and tutorials.
54 "checking": ["black", "hacking", "isort", "mypy==0.790", "blackdoc"],
55 "codecov": ["codecov", "pytest-cov"],
56 "doctest": [
57 "cma",
58 "matplotlib>=3.0.0",
59 "pandas",
60 "plotly>=4.0.0",
61 "scikit-learn>=0.19.0,<0.23.0",
62 "scikit-optimize",
63 "mlflow",
64 ],
65 "document": [
66 # TODO(nzw): Remove the version constraint after resolving the issue
67 # https://github.com/optuna/optuna/issues/2658.
68 "sphinx<4.0.0",
69 "sphinx_rtd_theme",
70 "sphinx-copybutton",
71 "sphinx-gallery",
72 "sphinx-plotly-directive",
73 "pillow",
74 "matplotlib",
75 "scikit-learn",
76 "plotly>=4.0.0", # optuna/visualization.
77 "pandas",
78 "lightgbm",
79 "torch==1.8.0",
80 "torchvision==0.9.0",
81 "torchaudio==0.8.0",
82 "thop",
83 ],
84 "example": [
85 "nbval",
86 "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
87 # optuna/visualization/param_importances.py.
88 "thop",
89 "torch==1.8.0 ; sys_platform=='darwin'",
90 "torch==1.8.0+cpu ; sys_platform!='darwin'",
91 "torchvision==0.9.0 ; sys_platform=='darwin'",
92 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
93 "torchaudio==0.8.0",
94 "botorch>=0.4.0 ; python_version>'3.6'",
95 "pandas",
96 "plotly",
97 "requests",
98 ],
99 "experimental": ["redis"],
100 "testing": [
101 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
102 # https://github.com/optuna/optuna/issues/1000.
103 "bokeh<2.0.0",
104 "chainer>=5.0.0",
105 "cma",
106 "fakeredis",
107 "lightgbm",
108 "matplotlib>=3.0.0",
109 "mlflow",
110 "mpi4py",
111 "mxnet",
112 "pandas",
113 "plotly>=4.0.0",
114 "pytest",
115 "scikit-learn>=0.19.0,<0.23.0",
116 "scikit-optimize",
117 "xgboost",
118 "keras",
119 # TODO(HideakiImamura): Remove the version constraint after resolving the issue
120 # https://github.com/keras-team/keras/issues/14632
121 "tensorflow<2.5.0 ; python_version<'3.9'",
122 "tensorflow-datasets",
123 "pytorch-ignite",
124 "pytorch-lightning>=1.0.2",
125 "skorch",
126 "catalyst>=21.3",
127 "torch==1.8.0 ; sys_platform=='darwin'",
128 "torch==1.8.0+cpu ; sys_platform!='darwin'",
129 "torchvision==0.9.0 ; sys_platform=='darwin'",
130 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
131 "torchaudio==0.8.0",
132 "allennlp>=2.2.0",
133 "botorch>=0.4.0 ; python_version>'3.6'",
134 "fastai",
135 ],
136 "tests": [
137 "fakeredis",
138 "pytest",
139 ],
140 "optional": [
141 "bokeh<2.0.0", # optuna/cli.py, optuna/dashboard.py.
142 "matplotlib>=3.0.0", # optuna/visualization/matplotlib
143 "pandas", # optuna/study.py
144 "plotly>=4.0.0", # optuna/visualization.
145 "redis", # optuna/storages/redis.py.
146 "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
147 # optuna/visualization/param_importances.py.
148 ],
149 "integration": [
150 # TODO(toshihikoyanase): Remove the version constraint after resolving the issue
151 # https://github.com/optuna/optuna/issues/1000.
152 "chainer>=5.0.0",
153 "cma",
154 "lightgbm",
155 "mlflow",
156 "mpi4py",
157 "mxnet",
158 "pandas",
159 "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
160 "scikit-optimize",
161 "xgboost",
162 "keras ; python_version<'3.9'",
163 # TODO(HideakiImamura): Remove the version constraint after resolving the issue
164 # https://github.com/keras-team/keras/issues/14632
165 "tensorflow<2.5.0 ; python_version<'3.9'",
166 "tensorflow-datasets ; python_version<'3.9'",
167 "pytorch-ignite",
168 "pytorch-lightning>=1.0.2",
169 "skorch",
170 "catalyst>=21.3",
171 "torch==1.8.0 ; sys_platform=='darwin'",
172 "torch==1.8.0+cpu ; sys_platform!='darwin'",
173 "torchvision==0.9.0 ; sys_platform=='darwin'",
174 "torchvision==0.9.0+cpu ; sys_platform!='darwin'",
175 "torchaudio==0.8.0",
176 "allennlp>=2.2.0",
177 "botorch>=0.4.0 ; python_version>'3.6'",
178 "fastai",
179 ],
180 }
181
182 return requirements
183
184
185 def find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:
186
187 for pkg in pkgs:
188 try:
189 return pkg_resources.get_distribution(pkg)
190 except pkg_resources.DistributionNotFound:
191 pass
192 return None
193
194
195 setup(
196 name="optuna",
197 version=get_version(),
198 description="A hyperparameter optimization framework",
199 long_description=get_long_description(),
200 long_description_content_type="text/markdown",
201 author="Takuya Akiba",
202 author_email="[email protected]",
203 url="https://optuna.org/",
204 packages=find_packages(exclude=("tests", "tests.*")),
205 package_data={
206 "optuna": [
207 "storages/_rdb/alembic.ini",
208 "storages/_rdb/alembic/*.*",
209 "storages/_rdb/alembic/versions/*.*",
210 "py.typed",
211 ]
212 },
213 python_requires=">=3.6",
214 install_requires=get_install_requires(),
215 tests_require=get_tests_require(),
216 extras_require=get_extras_require(),
217 entry_points={
218 "console_scripts": ["optuna = optuna.cli:main"],
219 "optuna.command": [
220 "create-study = optuna.cli:_CreateStudy",
221 "delete-study = optuna.cli:_DeleteStudy",
222 "study set-user-attr = optuna.cli:_StudySetUserAttribute",
223 "studies = optuna.cli:_Studies",
224 "dashboard = optuna.cli:_Dashboard",
225 "study optimize = optuna.cli:_StudyOptimize",
226 "storage upgrade = optuna.cli:_StorageUpgrade",
227 ],
228 },
229 classifiers=[
230 "Development Status :: 5 - Production/Stable",
231 "Intended Audience :: Science/Research",
232 "Intended Audience :: Developers",
233 "License :: OSI Approved :: MIT License",
234 "Programming Language :: Python :: 3",
235 "Programming Language :: Python :: 3.6",
236 "Programming Language :: Python :: 3.7",
237 "Programming Language :: Python :: 3.8",
238 "Programming Language :: Python :: 3.9",
239 "Programming Language :: Python :: 3 :: Only",
240 "Topic :: Scientific/Engineering",
241 "Topic :: Scientific/Engineering :: Mathematics",
242 "Topic :: Scientific/Engineering :: Artificial Intelligence",
243 "Topic :: Software Development",
244 "Topic :: Software Development :: Libraries",
245 "Topic :: Software Development :: Libraries :: Python Modules",
246 ],
247 )
248
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/setup.py b/setup.py
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@
"matplotlib>=3.0.0",
"pandas",
"plotly>=4.0.0",
- "scikit-learn>=0.19.0,<0.23.0",
+ "scikit-learn>=0.24.2",
"scikit-optimize",
"mlflow",
],
@@ -83,7 +83,7 @@
],
"example": [
"nbval",
- "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
+ "scikit-learn>=0.24.2",
# optuna/visualization/param_importances.py.
"thop",
"torch==1.8.0 ; sys_platform=='darwin'",
@@ -112,7 +112,7 @@
"pandas",
"plotly>=4.0.0",
"pytest",
- "scikit-learn>=0.19.0,<0.23.0",
+ "scikit-learn>=0.24.2",
"scikit-optimize",
"xgboost",
"keras",
@@ -143,7 +143,7 @@
"pandas", # optuna/study.py
"plotly>=4.0.0", # optuna/visualization.
"redis", # optuna/storages/redis.py.
- "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
+ "scikit-learn>=0.24.2",
# optuna/visualization/param_importances.py.
],
"integration": [
@@ -156,7 +156,7 @@
"mpi4py",
"mxnet",
"pandas",
- "scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'",
+ "scikit-learn>=0.24.2",
"scikit-optimize",
"xgboost",
"keras ; python_version<'3.9'",
|
{"golden_diff": "diff --git a/setup.py b/setup.py\n--- a/setup.py\n+++ b/setup.py\n@@ -58,7 +58,7 @@\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n- \"scikit-learn>=0.19.0,<0.23.0\",\n+ \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n@@ -83,7 +83,7 @@\n ],\n \"example\": [\n \"nbval\",\n- \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n+ \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n \"thop\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n@@ -112,7 +112,7 @@\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n- \"scikit-learn>=0.19.0,<0.23.0\",\n+ \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n@@ -143,7 +143,7 @@\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n- \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n+ \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n@@ -156,7 +156,7 @@\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n- \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n+ \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras ; python_version<'3.9'\",\n", "issue": "Review constrain scikit-learn< 0.23\nscikit-optimize 0.8.1 has been released which works together with scikit-learn >= 0.23\r\n\r\nhttps://github.com/scikit-optimize/scikit-optimize/releases/tag/v0.8.1\n", "before_files": [{"content": "import os\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n requirements = [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.8.2\",\n \"colorlog\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n return requirements\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n # TODO(HideakiImamura) Unpin mypy version after fixing \"Duplicate modules\" error in\n # examples and tutorials.\n \"checking\": [\"black\", \"hacking\", \"isort\", \"mypy==0.790\", \"blackdoc\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n \"document\": [\n # TODO(nzw): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/2658.\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-copybutton\",\n \"sphinx-gallery\",\n \"sphinx-plotly-directive\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n \"plotly>=4.0.0\", # optuna/visualization.\n \"pandas\",\n \"lightgbm\",\n \"torch==1.8.0\",\n \"torchvision==0.9.0\",\n \"torchaudio==0.8.0\",\n \"thop\",\n ],\n \"example\": [\n \"nbval\",\n \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n # optuna/visualization/param_importances.py.\n \"thop\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"pandas\",\n \"plotly\",\n \"requests\",\n ],\n \"experimental\": [\"redis\"],\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"matplotlib>=3.0.0\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.19.0,<0.23.0\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n # TODO(HideakiImamura): Remove the version constraint after resolving the issue\n # https://github.com/keras-team/keras/issues/14632\n \"tensorflow<2.5.0 ; python_version<'3.9'\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n \"pytorch-lightning>=1.0.2\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"tests\": [\n \"fakeredis\",\n \"pytest\",\n ],\n \"optional\": [\n \"bokeh<2.0.0\", # optuna/cli.py, optuna/dashboard.py.\n \"matplotlib>=3.0.0\", # optuna/visualization/matplotlib\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.19.0,<0.23.0 ; python_version<'3.9'\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras ; python_version<'3.9'\",\n # TODO(HideakiImamura): Remove the version constraint after resolving the issue\n # https://github.com/keras-team/keras/issues/14632\n \"tensorflow<2.5.0 ; python_version<'3.9'\",\n \"tensorflow-datasets ; python_version<'3.9'\",\n \"pytorch-ignite\",\n \"pytorch-lightning>=1.0.2\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n \"py.typed\",\n ]\n },\n python_requires=\">=3.6\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"dashboard = optuna.cli:_Dashboard\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}], "after_files": [{"content": "import os\nfrom typing import Dict\nfrom typing import List\nfrom typing import Optional\n\nimport pkg_resources\nfrom setuptools import find_packages\nfrom setuptools import setup\n\n\ndef get_version() -> str:\n\n version_filepath = os.path.join(os.path.dirname(__file__), \"optuna\", \"version.py\")\n with open(version_filepath) as f:\n for line in f:\n if line.startswith(\"__version__\"):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ndef get_long_description() -> str:\n\n readme_filepath = os.path.join(os.path.dirname(__file__), \"README.md\")\n with open(readme_filepath) as f:\n return f.read()\n\n\ndef get_install_requires() -> List[str]:\n\n requirements = [\n \"alembic\",\n \"cliff\",\n \"cmaes>=0.8.2\",\n \"colorlog\",\n \"numpy\",\n \"packaging>=20.0\",\n \"scipy!=1.4.0\",\n \"sqlalchemy>=1.1.0\",\n \"tqdm\",\n ]\n return requirements\n\n\ndef get_tests_require() -> List[str]:\n\n return get_extras_require()[\"testing\"]\n\n\ndef get_extras_require() -> Dict[str, List[str]]:\n\n requirements = {\n # TODO(HideakiImamura) Unpin mypy version after fixing \"Duplicate modules\" error in\n # examples and tutorials.\n \"checking\": [\"black\", \"hacking\", \"isort\", \"mypy==0.790\", \"blackdoc\"],\n \"codecov\": [\"codecov\", \"pytest-cov\"],\n \"doctest\": [\n \"cma\",\n \"matplotlib>=3.0.0\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"mlflow\",\n ],\n \"document\": [\n # TODO(nzw): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/2658.\n \"sphinx<4.0.0\",\n \"sphinx_rtd_theme\",\n \"sphinx-copybutton\",\n \"sphinx-gallery\",\n \"sphinx-plotly-directive\",\n \"pillow\",\n \"matplotlib\",\n \"scikit-learn\",\n \"plotly>=4.0.0\", # optuna/visualization.\n \"pandas\",\n \"lightgbm\",\n \"torch==1.8.0\",\n \"torchvision==0.9.0\",\n \"torchaudio==0.8.0\",\n \"thop\",\n ],\n \"example\": [\n \"nbval\",\n \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n \"thop\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"pandas\",\n \"plotly\",\n \"requests\",\n ],\n \"experimental\": [\"redis\"],\n \"testing\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"bokeh<2.0.0\",\n \"chainer>=5.0.0\",\n \"cma\",\n \"fakeredis\",\n \"lightgbm\",\n \"matplotlib>=3.0.0\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"plotly>=4.0.0\",\n \"pytest\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras\",\n # TODO(HideakiImamura): Remove the version constraint after resolving the issue\n # https://github.com/keras-team/keras/issues/14632\n \"tensorflow<2.5.0 ; python_version<'3.9'\",\n \"tensorflow-datasets\",\n \"pytorch-ignite\",\n \"pytorch-lightning>=1.0.2\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n \"tests\": [\n \"fakeredis\",\n \"pytest\",\n ],\n \"optional\": [\n \"bokeh<2.0.0\", # optuna/cli.py, optuna/dashboard.py.\n \"matplotlib>=3.0.0\", # optuna/visualization/matplotlib\n \"pandas\", # optuna/study.py\n \"plotly>=4.0.0\", # optuna/visualization.\n \"redis\", # optuna/storages/redis.py.\n \"scikit-learn>=0.24.2\",\n # optuna/visualization/param_importances.py.\n ],\n \"integration\": [\n # TODO(toshihikoyanase): Remove the version constraint after resolving the issue\n # https://github.com/optuna/optuna/issues/1000.\n \"chainer>=5.0.0\",\n \"cma\",\n \"lightgbm\",\n \"mlflow\",\n \"mpi4py\",\n \"mxnet\",\n \"pandas\",\n \"scikit-learn>=0.24.2\",\n \"scikit-optimize\",\n \"xgboost\",\n \"keras ; python_version<'3.9'\",\n # TODO(HideakiImamura): Remove the version constraint after resolving the issue\n # https://github.com/keras-team/keras/issues/14632\n \"tensorflow<2.5.0 ; python_version<'3.9'\",\n \"tensorflow-datasets ; python_version<'3.9'\",\n \"pytorch-ignite\",\n \"pytorch-lightning>=1.0.2\",\n \"skorch\",\n \"catalyst>=21.3\",\n \"torch==1.8.0 ; sys_platform=='darwin'\",\n \"torch==1.8.0+cpu ; sys_platform!='darwin'\",\n \"torchvision==0.9.0 ; sys_platform=='darwin'\",\n \"torchvision==0.9.0+cpu ; sys_platform!='darwin'\",\n \"torchaudio==0.8.0\",\n \"allennlp>=2.2.0\",\n \"botorch>=0.4.0 ; python_version>'3.6'\",\n \"fastai\",\n ],\n }\n\n return requirements\n\n\ndef find_any_distribution(pkgs: List[str]) -> Optional[pkg_resources.Distribution]:\n\n for pkg in pkgs:\n try:\n return pkg_resources.get_distribution(pkg)\n except pkg_resources.DistributionNotFound:\n pass\n return None\n\n\nsetup(\n name=\"optuna\",\n version=get_version(),\n description=\"A hyperparameter optimization framework\",\n long_description=get_long_description(),\n long_description_content_type=\"text/markdown\",\n author=\"Takuya Akiba\",\n author_email=\"[email protected]\",\n url=\"https://optuna.org/\",\n packages=find_packages(exclude=(\"tests\", \"tests.*\")),\n package_data={\n \"optuna\": [\n \"storages/_rdb/alembic.ini\",\n \"storages/_rdb/alembic/*.*\",\n \"storages/_rdb/alembic/versions/*.*\",\n \"py.typed\",\n ]\n },\n python_requires=\">=3.6\",\n install_requires=get_install_requires(),\n tests_require=get_tests_require(),\n extras_require=get_extras_require(),\n entry_points={\n \"console_scripts\": [\"optuna = optuna.cli:main\"],\n \"optuna.command\": [\n \"create-study = optuna.cli:_CreateStudy\",\n \"delete-study = optuna.cli:_DeleteStudy\",\n \"study set-user-attr = optuna.cli:_StudySetUserAttribute\",\n \"studies = optuna.cli:_Studies\",\n \"dashboard = optuna.cli:_Dashboard\",\n \"study optimize = optuna.cli:_StudyOptimize\",\n \"storage upgrade = optuna.cli:_StorageUpgrade\",\n ],\n },\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3 :: Only\",\n \"Topic :: Scientific/Engineering\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n \"Topic :: Scientific/Engineering :: Artificial Intelligence\",\n \"Topic :: Software Development\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n)\n", "path": "setup.py"}]}
| 3,134 | 520 |
gh_patches_debug_6186
|
rasdani/github-patches
|
git_diff
|
ckan__ckan-4092
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Solr credential ENV vars not passed alongside CKAN_SOLR_URL
### CKAN Version if known (or site URL)
2.7.2
### Please describe the expected behaviour
Setting the following environment variables enables basic authentication for a Solr instance:
```
CKAN_SOLR_URL
CKAN_SOLR_USER
CKAN_SOLR_PASSWORD
```
### Please describe the actual behaviour
`CKAN_SOLR_URL` is honoured, but `CKAN_SOLR_USER` and `CKAN_SOLR_PASSWORD` are ignored and must be set in `ckan.ini`.
### What steps can be taken to reproduce the issue?
1. Set the following environment variables:
```
CKAN_SOLR_USER=my_missing_user
CKAN_SOLR_PASSWORD=my_password
```
2. Restart CKAN
3. Observe that nothing has changed
Alternatively, create a Solr instance with Basic authentication, set the correct credentials as environment variables, then restart CKAN and watch the Solr connection fail.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ckan/config/environment.py`
Content:
```
1 # encoding: utf-8
2
3 '''CKAN environment configuration'''
4 import os
5 import logging
6 import warnings
7 from urlparse import urlparse
8 import pytz
9
10 import sqlalchemy
11 from pylons import config as pylons_config
12 import formencode
13
14 import ckan.config.routing as routing
15 import ckan.model as model
16 import ckan.plugins as p
17 import ckan.lib.helpers as helpers
18 import ckan.lib.app_globals as app_globals
19 from ckan.lib.redis import is_redis_available
20 import ckan.lib.render as render
21 import ckan.lib.search as search
22 import ckan.logic as logic
23 import ckan.authz as authz
24 import ckan.lib.jinja_extensions as jinja_extensions
25 from ckan.lib.i18n import build_js_translations
26
27 from ckan.common import _, ungettext, config
28 from ckan.exceptions import CkanConfigurationException
29
30 log = logging.getLogger(__name__)
31
32
33 # Suppress benign warning 'Unbuilt egg for setuptools'
34 warnings.simplefilter('ignore', UserWarning)
35
36
37 def load_environment(global_conf, app_conf):
38 """
39 Configure the Pylons environment via the ``pylons.config`` object. This
40 code should only need to be run once.
41 """
42 # this must be run at a time when the env is semi-setup, thus inlined here.
43 # Required by the deliverance plugin and iATI
44 from pylons.wsgiapp import PylonsApp
45 import pkg_resources
46 find_controller_generic = PylonsApp.find_controller
47
48 # This is from pylons 1.0 source, will monkey-patch into 0.9.7
49 def find_controller(self, controller):
50 if controller in self.controller_classes:
51 return self.controller_classes[controller]
52 # Check to see if its a dotted name
53 if '.' in controller or ':' in controller:
54 ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))
55
56 if hasattr(ep, 'resolve'):
57 # setuptools >= 10.2
58 mycontroller = ep.resolve()
59 else:
60 # setuptools >= 11.3
61 mycontroller = ep.load(False)
62
63 self.controller_classes[controller] = mycontroller
64 return mycontroller
65 return find_controller_generic(self, controller)
66 PylonsApp.find_controller = find_controller
67
68 os.environ['CKAN_CONFIG'] = global_conf['__file__']
69
70 # Pylons paths
71 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
72
73 valid_base_public_folder_names = ['public', 'public-bs2']
74 static_files = app_conf.get('ckan.base_public_folder', 'public')
75 app_conf['ckan.base_public_folder'] = static_files
76
77 if static_files not in valid_base_public_folder_names:
78 raise CkanConfigurationException(
79 'You provided an invalid value for ckan.base_public_folder. '
80 'Possible values are: "public" and "public-bs2".'
81 )
82
83 log.info('Loading static files from %s' % static_files)
84 paths = dict(root=root,
85 controllers=os.path.join(root, 'controllers'),
86 static_files=os.path.join(root, static_files),
87 templates=[])
88
89 # Initialize main CKAN config object
90 config.update(global_conf)
91 config.update(app_conf)
92
93 # Initialize Pylons own config object
94 pylons_config.init_app(global_conf, app_conf, package='ckan', paths=paths)
95
96 # Update the main CKAN config object with the Pylons specific stuff, as it
97 # quite hard to keep them separated. This should be removed once Pylons
98 # support is dropped
99 config.update(pylons_config)
100
101 # Setup the SQLAlchemy database engine
102 # Suppress a couple of sqlalchemy warnings
103 msgs = ['^Unicode type received non-unicode bind param value',
104 "^Did not recognize type 'BIGINT' of column 'size'",
105 "^Did not recognize type 'tsvector' of column 'search_vector'"
106 ]
107 for msg in msgs:
108 warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)
109
110 # load all CKAN plugins
111 p.load_all()
112
113 # Check Redis availability
114 if not is_redis_available():
115 log.critical('Could not connect to Redis.')
116
117 app_globals.reset()
118
119 # issue #3260: remove idle transaction
120 # Session that was used for getting all config params nor committed,
121 # neither removed and we have idle connection as result
122 model.Session.commit()
123
124 # Build JavaScript translations. Must be done after plugins have
125 # been loaded.
126 build_js_translations()
127
128
129 # A mapping of config settings that can be overridden by env vars.
130 # Note: Do not remove the following lines, they are used in the docs
131 # Start CONFIG_FROM_ENV_VARS
132 CONFIG_FROM_ENV_VARS = {
133 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',
134 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',
135 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
136 'ckan.redis.url': 'CKAN_REDIS_URL',
137 'solr_url': 'CKAN_SOLR_URL',
138 'ckan.site_id': 'CKAN_SITE_ID',
139 'ckan.site_url': 'CKAN_SITE_URL',
140 'ckan.storage_path': 'CKAN_STORAGE_PATH',
141 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',
142 'smtp.server': 'CKAN_SMTP_SERVER',
143 'smtp.starttls': 'CKAN_SMTP_STARTTLS',
144 'smtp.user': 'CKAN_SMTP_USER',
145 'smtp.password': 'CKAN_SMTP_PASSWORD',
146 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'
147 }
148 # End CONFIG_FROM_ENV_VARS
149
150
151 def update_config():
152 ''' This code needs to be run when the config is changed to take those
153 changes into account. It is called whenever a plugin is loaded as the
154 plugin might have changed the config values (for instance it might
155 change ckan.site_url) '''
156
157 for plugin in p.PluginImplementations(p.IConfigurer):
158 # must do update in place as this does not work:
159 # config = plugin.update_config(config)
160 plugin.update_config(config)
161
162 # Set whitelisted env vars on config object
163 # This is set up before globals are initialized
164
165 ckan_db = os.environ.get('CKAN_DB', None)
166 if ckan_db:
167 msg = 'Setting CKAN_DB as an env var is deprecated and will be' \
168 ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'
169 log.warn(msg)
170 config['sqlalchemy.url'] = ckan_db
171
172 for option in CONFIG_FROM_ENV_VARS:
173 from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)
174 if from_env:
175 config[option] = from_env
176
177 root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
178
179 site_url = config.get('ckan.site_url', '')
180 if not site_url:
181 raise RuntimeError(
182 'ckan.site_url is not configured and it must have a value.'
183 ' Please amend your .ini file.')
184 if not site_url.lower().startswith('http'):
185 raise RuntimeError(
186 'ckan.site_url should be a full URL, including the schema '
187 '(http or https)')
188
189 display_timezone = config.get('ckan.display_timezone', '')
190 if (display_timezone and
191 display_timezone != 'server' and
192 display_timezone not in pytz.all_timezones):
193 raise CkanConfigurationException(
194 "ckan.display_timezone is not 'server' or a valid timezone"
195 )
196
197 # Remove backslash from site_url if present
198 config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')
199
200 ckan_host = config['ckan.host'] = urlparse(site_url).netloc
201 if config.get('ckan.site_id') is None:
202 if ':' in ckan_host:
203 ckan_host, port = ckan_host.split(':')
204 assert ckan_host, 'You need to configure ckan.site_url or ' \
205 'ckan.site_id for SOLR search-index rebuild to work.'
206 config['ckan.site_id'] = ckan_host
207
208 # ensure that a favicon has been set
209 favicon = config.get('ckan.favicon', '/base/images/ckan.ico')
210 config['ckan.favicon'] = favicon
211
212 # Init SOLR settings and check if the schema is compatible
213 # from ckan.lib.search import SolrSettings, check_solr_schema_version
214
215 # lib.search is imported here as we need the config enabled and parsed
216 search.SolrSettings.init(config.get('solr_url'),
217 config.get('solr_user'),
218 config.get('solr_password'))
219 search.check_solr_schema_version()
220
221 routes_map = routing.make_map()
222 config['routes.map'] = routes_map
223 # The RoutesMiddleware needs its mapper updating if it exists
224 if 'routes.middleware' in config:
225 config['routes.middleware'].mapper = routes_map
226 # routes.named_routes is a CKAN thing
227 config['routes.named_routes'] = routing.named_routes
228 config['pylons.app_globals'] = app_globals.app_globals
229 # initialise the globals
230 app_globals.app_globals._init()
231
232 helpers.load_plugin_helpers()
233 config['pylons.h'] = helpers.helper_functions
234
235 # Templates and CSS loading from configuration
236 valid_base_templates_folder_names = ['templates', 'templates-bs2']
237 templates = config.get('ckan.base_templates_folder', 'templates')
238 config['ckan.base_templates_folder'] = templates
239
240 if templates not in valid_base_templates_folder_names:
241 raise CkanConfigurationException(
242 'You provided an invalid value for ckan.base_templates_folder. '
243 'Possible values are: "templates" and "templates-bs2".'
244 )
245
246 jinja2_templates_path = os.path.join(root, templates)
247 log.info('Loading templates from %s' % jinja2_templates_path)
248 template_paths = [jinja2_templates_path]
249
250 extra_template_paths = config.get('extra_template_paths', '')
251 if extra_template_paths:
252 # must be first for them to override defaults
253 template_paths = extra_template_paths.split(',') + template_paths
254 config['computed_template_paths'] = template_paths
255
256 # Set the default language for validation messages from formencode
257 # to what is set as the default locale in the config
258 default_lang = config.get('ckan.locale_default', 'en')
259 formencode.api.set_stdtranslation(domain="FormEncode",
260 languages=[default_lang])
261
262 # Markdown ignores the logger config, so to get rid of excessive
263 # markdown debug messages in the log, set it to the level of the
264 # root logger.
265 logging.getLogger("MARKDOWN").setLevel(logging.getLogger().level)
266
267 # Create Jinja2 environment
268 env = jinja_extensions.Environment(
269 **jinja_extensions.get_jinja_env_options())
270 env.install_gettext_callables(_, ungettext, newstyle=True)
271 # custom filters
272 env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape
273 config['pylons.app_globals'].jinja_env = env
274
275 # CONFIGURATION OPTIONS HERE (note: all config options will override
276 # any Pylons config options)
277
278 # Initialize SQLAlchemy
279 engine = sqlalchemy.engine_from_config(config, client_encoding='utf8')
280 model.init_model(engine)
281
282 for plugin in p.PluginImplementations(p.IConfigurable):
283 plugin.configure(config)
284
285 # reset the template cache - we do this here so that when we load the
286 # environment it is clean
287 render.reset_template_info_cache()
288
289 # clear other caches
290 logic.clear_actions_cache()
291 logic.clear_validators_cache()
292 authz.clear_auth_functions_cache()
293
294 # Here we create the site user if they are not already in the database
295 try:
296 logic.get_action('get_site_user')({'ignore_auth': True}, None)
297 except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):
298 # (ProgrammingError for Postgres, OperationalError for SQLite)
299 # The database is not initialised. This is a bit dirty. This occurs
300 # when running tests.
301 pass
302 except sqlalchemy.exc.InternalError:
303 # The database is not initialised. Travis hits this
304 pass
305
306 # Close current session and open database connections to ensure a clean
307 # clean environment even if an error occurs later on
308 model.Session.remove()
309 model.Session.bind.dispose()
310
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ckan/config/environment.py b/ckan/config/environment.py
--- a/ckan/config/environment.py
+++ b/ckan/config/environment.py
@@ -135,6 +135,8 @@
'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',
'ckan.redis.url': 'CKAN_REDIS_URL',
'solr_url': 'CKAN_SOLR_URL',
+ 'solr_user': 'CKAN_SOLR_USER',
+ 'solr_password': 'CKAN_SOLR_PASSWORD',
'ckan.site_id': 'CKAN_SITE_ID',
'ckan.site_url': 'CKAN_SITE_URL',
'ckan.storage_path': 'CKAN_STORAGE_PATH',
|
{"golden_diff": "diff --git a/ckan/config/environment.py b/ckan/config/environment.py\n--- a/ckan/config/environment.py\n+++ b/ckan/config/environment.py\n@@ -135,6 +135,8 @@\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n+ 'solr_user': 'CKAN_SOLR_USER',\n+ 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n", "issue": "Solr credential ENV vars not passed alongside CKAN_SOLR_URL\n### CKAN Version if known (or site URL)\r\n\r\n2.7.2\r\n\r\n### Please describe the expected behaviour\r\n\r\nSetting the following environment variables enables basic authentication for a Solr instance:\r\n\r\n```\r\nCKAN_SOLR_URL\r\nCKAN_SOLR_USER\r\nCKAN_SOLR_PASSWORD\r\n```\r\n\r\n### Please describe the actual behaviour\r\n\r\n`CKAN_SOLR_URL` is honoured, but `CKAN_SOLR_USER` and `CKAN_SOLR_PASSWORD` are ignored and must be set in `ckan.ini`.\r\n\r\n### What steps can be taken to reproduce the issue? \r\n\r\n1. Set the following environment variables:\r\n\r\n```\r\nCKAN_SOLR_USER=my_missing_user\r\nCKAN_SOLR_PASSWORD=my_password\r\n```\r\n\r\n2. Restart CKAN\r\n3. Observe that nothing has changed\r\n\r\nAlternatively, create a Solr instance with Basic authentication, set the correct credentials as environment variables, then restart CKAN and watch the Solr connection fail.\n", "before_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nfrom urlparse import urlparse\nimport pytz\n\nimport sqlalchemy\nfrom pylons import config as pylons_config\nimport formencode\n\nimport ckan.config.routing as routing\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.render as render\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.lib.jinja_extensions as jinja_extensions\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import _, ungettext, config\nfrom ckan.exceptions import CkanConfigurationException\n\nlog = logging.getLogger(__name__)\n\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(global_conf, app_conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n # this must be run at a time when the env is semi-setup, thus inlined here.\n # Required by the deliverance plugin and iATI\n from pylons.wsgiapp import PylonsApp\n import pkg_resources\n find_controller_generic = PylonsApp.find_controller\n\n # This is from pylons 1.0 source, will monkey-patch into 0.9.7\n def find_controller(self, controller):\n if controller in self.controller_classes:\n return self.controller_classes[controller]\n # Check to see if its a dotted name\n if '.' in controller or ':' in controller:\n ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))\n\n if hasattr(ep, 'resolve'):\n # setuptools >= 10.2\n mycontroller = ep.resolve()\n else:\n # setuptools >= 11.3\n mycontroller = ep.load(False)\n\n self.controller_classes[controller] = mycontroller\n return mycontroller\n return find_controller_generic(self, controller)\n PylonsApp.find_controller = find_controller\n\n os.environ['CKAN_CONFIG'] = global_conf['__file__']\n\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n valid_base_public_folder_names = ['public', 'public-bs2']\n static_files = app_conf.get('ckan.base_public_folder', 'public')\n app_conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\" and \"public-bs2\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, static_files),\n templates=[])\n\n # Initialize main CKAN config object\n config.update(global_conf)\n config.update(app_conf)\n\n # Initialize Pylons own config object\n pylons_config.init_app(global_conf, app_conf, package='ckan', paths=paths)\n\n # Update the main CKAN config object with the Pylons specific stuff, as it\n # quite hard to keep them separated. This should be removed once Pylons\n # support is dropped\n config.update(pylons_config)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get('ckan.site_url', '')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n\n display_timezone = config.get('ckan.display_timezone', '')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Remove backslash from site_url if present\n config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')\n\n ckan_host = config['ckan.host'] = urlparse(site_url).netloc\n if config.get('ckan.site_id') is None:\n if ':' in ckan_host:\n ckan_host, port = ckan_host.split(':')\n assert ckan_host, 'You need to configure ckan.site_url or ' \\\n 'ckan.site_id for SOLR search-index rebuild to work.'\n config['ckan.site_id'] = ckan_host\n\n # ensure that a favicon has been set\n favicon = config.get('ckan.favicon', '/base/images/ckan.ico')\n config['ckan.favicon'] = favicon\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get('solr_url'),\n config.get('solr_user'),\n config.get('solr_password'))\n search.check_solr_schema_version()\n\n routes_map = routing.make_map()\n config['routes.map'] = routes_map\n # The RoutesMiddleware needs its mapper updating if it exists\n if 'routes.middleware' in config:\n config['routes.middleware'].mapper = routes_map\n # routes.named_routes is a CKAN thing\n config['routes.named_routes'] = routing.named_routes\n config['pylons.app_globals'] = app_globals.app_globals\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n config['pylons.h'] = helpers.helper_functions\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates', 'templates-bs2']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\" and \"templates-bs2\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get('extra_template_paths', '')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Set the default language for validation messages from formencode\n # to what is set as the default locale in the config\n default_lang = config.get('ckan.locale_default', 'en')\n formencode.api.set_stdtranslation(domain=\"FormEncode\",\n languages=[default_lang])\n\n # Markdown ignores the logger config, so to get rid of excessive\n # markdown debug messages in the log, set it to the level of the\n # root logger.\n logging.getLogger(\"MARKDOWN\").setLevel(logging.getLogger().level)\n\n # Create Jinja2 environment\n env = jinja_extensions.Environment(\n **jinja_extensions.get_jinja_env_options())\n env.install_gettext_callables(_, ungettext, newstyle=True)\n # custom filters\n env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape\n config['pylons.app_globals'].jinja_env = env\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config, client_encoding='utf8')\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # reset the template cache - we do this here so that when we load the\n # environment it is clean\n render.reset_template_info_cache()\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # (ProgrammingError for Postgres, OperationalError for SQLite)\n # The database is not initialised. This is a bit dirty. This occurs\n # when running tests.\n pass\n except sqlalchemy.exc.InternalError:\n # The database is not initialised. Travis hits this\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}], "after_files": [{"content": "# encoding: utf-8\n\n'''CKAN environment configuration'''\nimport os\nimport logging\nimport warnings\nfrom urlparse import urlparse\nimport pytz\n\nimport sqlalchemy\nfrom pylons import config as pylons_config\nimport formencode\n\nimport ckan.config.routing as routing\nimport ckan.model as model\nimport ckan.plugins as p\nimport ckan.lib.helpers as helpers\nimport ckan.lib.app_globals as app_globals\nfrom ckan.lib.redis import is_redis_available\nimport ckan.lib.render as render\nimport ckan.lib.search as search\nimport ckan.logic as logic\nimport ckan.authz as authz\nimport ckan.lib.jinja_extensions as jinja_extensions\nfrom ckan.lib.i18n import build_js_translations\n\nfrom ckan.common import _, ungettext, config\nfrom ckan.exceptions import CkanConfigurationException\n\nlog = logging.getLogger(__name__)\n\n\n# Suppress benign warning 'Unbuilt egg for setuptools'\nwarnings.simplefilter('ignore', UserWarning)\n\n\ndef load_environment(global_conf, app_conf):\n \"\"\"\n Configure the Pylons environment via the ``pylons.config`` object. This\n code should only need to be run once.\n \"\"\"\n # this must be run at a time when the env is semi-setup, thus inlined here.\n # Required by the deliverance plugin and iATI\n from pylons.wsgiapp import PylonsApp\n import pkg_resources\n find_controller_generic = PylonsApp.find_controller\n\n # This is from pylons 1.0 source, will monkey-patch into 0.9.7\n def find_controller(self, controller):\n if controller in self.controller_classes:\n return self.controller_classes[controller]\n # Check to see if its a dotted name\n if '.' in controller or ':' in controller:\n ep = pkg_resources.EntryPoint.parse('x={0}'.format(controller))\n\n if hasattr(ep, 'resolve'):\n # setuptools >= 10.2\n mycontroller = ep.resolve()\n else:\n # setuptools >= 11.3\n mycontroller = ep.load(False)\n\n self.controller_classes[controller] = mycontroller\n return mycontroller\n return find_controller_generic(self, controller)\n PylonsApp.find_controller = find_controller\n\n os.environ['CKAN_CONFIG'] = global_conf['__file__']\n\n # Pylons paths\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n valid_base_public_folder_names = ['public', 'public-bs2']\n static_files = app_conf.get('ckan.base_public_folder', 'public')\n app_conf['ckan.base_public_folder'] = static_files\n\n if static_files not in valid_base_public_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_public_folder. '\n 'Possible values are: \"public\" and \"public-bs2\".'\n )\n\n log.info('Loading static files from %s' % static_files)\n paths = dict(root=root,\n controllers=os.path.join(root, 'controllers'),\n static_files=os.path.join(root, static_files),\n templates=[])\n\n # Initialize main CKAN config object\n config.update(global_conf)\n config.update(app_conf)\n\n # Initialize Pylons own config object\n pylons_config.init_app(global_conf, app_conf, package='ckan', paths=paths)\n\n # Update the main CKAN config object with the Pylons specific stuff, as it\n # quite hard to keep them separated. This should be removed once Pylons\n # support is dropped\n config.update(pylons_config)\n\n # Setup the SQLAlchemy database engine\n # Suppress a couple of sqlalchemy warnings\n msgs = ['^Unicode type received non-unicode bind param value',\n \"^Did not recognize type 'BIGINT' of column 'size'\",\n \"^Did not recognize type 'tsvector' of column 'search_vector'\"\n ]\n for msg in msgs:\n warnings.filterwarnings('ignore', msg, sqlalchemy.exc.SAWarning)\n\n # load all CKAN plugins\n p.load_all()\n\n # Check Redis availability\n if not is_redis_available():\n log.critical('Could not connect to Redis.')\n\n app_globals.reset()\n\n # issue #3260: remove idle transaction\n # Session that was used for getting all config params nor committed,\n # neither removed and we have idle connection as result\n model.Session.commit()\n\n # Build JavaScript translations. Must be done after plugins have\n # been loaded.\n build_js_translations()\n\n\n# A mapping of config settings that can be overridden by env vars.\n# Note: Do not remove the following lines, they are used in the docs\n# Start CONFIG_FROM_ENV_VARS\nCONFIG_FROM_ENV_VARS = {\n 'sqlalchemy.url': 'CKAN_SQLALCHEMY_URL',\n 'ckan.datastore.write_url': 'CKAN_DATASTORE_WRITE_URL',\n 'ckan.datastore.read_url': 'CKAN_DATASTORE_READ_URL',\n 'ckan.redis.url': 'CKAN_REDIS_URL',\n 'solr_url': 'CKAN_SOLR_URL',\n 'solr_user': 'CKAN_SOLR_USER',\n 'solr_password': 'CKAN_SOLR_PASSWORD',\n 'ckan.site_id': 'CKAN_SITE_ID',\n 'ckan.site_url': 'CKAN_SITE_URL',\n 'ckan.storage_path': 'CKAN_STORAGE_PATH',\n 'ckan.datapusher.url': 'CKAN_DATAPUSHER_URL',\n 'smtp.server': 'CKAN_SMTP_SERVER',\n 'smtp.starttls': 'CKAN_SMTP_STARTTLS',\n 'smtp.user': 'CKAN_SMTP_USER',\n 'smtp.password': 'CKAN_SMTP_PASSWORD',\n 'smtp.mail_from': 'CKAN_SMTP_MAIL_FROM'\n}\n# End CONFIG_FROM_ENV_VARS\n\n\ndef update_config():\n ''' This code needs to be run when the config is changed to take those\n changes into account. It is called whenever a plugin is loaded as the\n plugin might have changed the config values (for instance it might\n change ckan.site_url) '''\n\n for plugin in p.PluginImplementations(p.IConfigurer):\n # must do update in place as this does not work:\n # config = plugin.update_config(config)\n plugin.update_config(config)\n\n # Set whitelisted env vars on config object\n # This is set up before globals are initialized\n\n ckan_db = os.environ.get('CKAN_DB', None)\n if ckan_db:\n msg = 'Setting CKAN_DB as an env var is deprecated and will be' \\\n ' removed in a future release. Use CKAN_SQLALCHEMY_URL instead.'\n log.warn(msg)\n config['sqlalchemy.url'] = ckan_db\n\n for option in CONFIG_FROM_ENV_VARS:\n from_env = os.environ.get(CONFIG_FROM_ENV_VARS[option], None)\n if from_env:\n config[option] = from_env\n\n root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n site_url = config.get('ckan.site_url', '')\n if not site_url:\n raise RuntimeError(\n 'ckan.site_url is not configured and it must have a value.'\n ' Please amend your .ini file.')\n if not site_url.lower().startswith('http'):\n raise RuntimeError(\n 'ckan.site_url should be a full URL, including the schema '\n '(http or https)')\n\n display_timezone = config.get('ckan.display_timezone', '')\n if (display_timezone and\n display_timezone != 'server' and\n display_timezone not in pytz.all_timezones):\n raise CkanConfigurationException(\n \"ckan.display_timezone is not 'server' or a valid timezone\"\n )\n\n # Remove backslash from site_url if present\n config['ckan.site_url'] = config['ckan.site_url'].rstrip('/')\n\n ckan_host = config['ckan.host'] = urlparse(site_url).netloc\n if config.get('ckan.site_id') is None:\n if ':' in ckan_host:\n ckan_host, port = ckan_host.split(':')\n assert ckan_host, 'You need to configure ckan.site_url or ' \\\n 'ckan.site_id for SOLR search-index rebuild to work.'\n config['ckan.site_id'] = ckan_host\n\n # ensure that a favicon has been set\n favicon = config.get('ckan.favicon', '/base/images/ckan.ico')\n config['ckan.favicon'] = favicon\n\n # Init SOLR settings and check if the schema is compatible\n # from ckan.lib.search import SolrSettings, check_solr_schema_version\n\n # lib.search is imported here as we need the config enabled and parsed\n search.SolrSettings.init(config.get('solr_url'),\n config.get('solr_user'),\n config.get('solr_password'))\n search.check_solr_schema_version()\n\n routes_map = routing.make_map()\n config['routes.map'] = routes_map\n # The RoutesMiddleware needs its mapper updating if it exists\n if 'routes.middleware' in config:\n config['routes.middleware'].mapper = routes_map\n # routes.named_routes is a CKAN thing\n config['routes.named_routes'] = routing.named_routes\n config['pylons.app_globals'] = app_globals.app_globals\n # initialise the globals\n app_globals.app_globals._init()\n\n helpers.load_plugin_helpers()\n config['pylons.h'] = helpers.helper_functions\n\n # Templates and CSS loading from configuration\n valid_base_templates_folder_names = ['templates', 'templates-bs2']\n templates = config.get('ckan.base_templates_folder', 'templates')\n config['ckan.base_templates_folder'] = templates\n\n if templates not in valid_base_templates_folder_names:\n raise CkanConfigurationException(\n 'You provided an invalid value for ckan.base_templates_folder. '\n 'Possible values are: \"templates\" and \"templates-bs2\".'\n )\n\n jinja2_templates_path = os.path.join(root, templates)\n log.info('Loading templates from %s' % jinja2_templates_path)\n template_paths = [jinja2_templates_path]\n\n extra_template_paths = config.get('extra_template_paths', '')\n if extra_template_paths:\n # must be first for them to override defaults\n template_paths = extra_template_paths.split(',') + template_paths\n config['computed_template_paths'] = template_paths\n\n # Set the default language for validation messages from formencode\n # to what is set as the default locale in the config\n default_lang = config.get('ckan.locale_default', 'en')\n formencode.api.set_stdtranslation(domain=\"FormEncode\",\n languages=[default_lang])\n\n # Markdown ignores the logger config, so to get rid of excessive\n # markdown debug messages in the log, set it to the level of the\n # root logger.\n logging.getLogger(\"MARKDOWN\").setLevel(logging.getLogger().level)\n\n # Create Jinja2 environment\n env = jinja_extensions.Environment(\n **jinja_extensions.get_jinja_env_options())\n env.install_gettext_callables(_, ungettext, newstyle=True)\n # custom filters\n env.filters['empty_and_escape'] = jinja_extensions.empty_and_escape\n config['pylons.app_globals'].jinja_env = env\n\n # CONFIGURATION OPTIONS HERE (note: all config options will override\n # any Pylons config options)\n\n # Initialize SQLAlchemy\n engine = sqlalchemy.engine_from_config(config, client_encoding='utf8')\n model.init_model(engine)\n\n for plugin in p.PluginImplementations(p.IConfigurable):\n plugin.configure(config)\n\n # reset the template cache - we do this here so that when we load the\n # environment it is clean\n render.reset_template_info_cache()\n\n # clear other caches\n logic.clear_actions_cache()\n logic.clear_validators_cache()\n authz.clear_auth_functions_cache()\n\n # Here we create the site user if they are not already in the database\n try:\n logic.get_action('get_site_user')({'ignore_auth': True}, None)\n except (sqlalchemy.exc.ProgrammingError, sqlalchemy.exc.OperationalError):\n # (ProgrammingError for Postgres, OperationalError for SQLite)\n # The database is not initialised. This is a bit dirty. This occurs\n # when running tests.\n pass\n except sqlalchemy.exc.InternalError:\n # The database is not initialised. Travis hits this\n pass\n\n # Close current session and open database connections to ensure a clean\n # clean environment even if an error occurs later on\n model.Session.remove()\n model.Session.bind.dispose()\n", "path": "ckan/config/environment.py"}]}
| 3,996 | 161 |
gh_patches_debug_18800
|
rasdani/github-patches
|
git_diff
|
DistrictDataLabs__yellowbrick-407
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
ClassificationScoreVisualizers should return accuracy
See #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `yellowbrick/classifier/base.py`
Content:
```
1 # yellowbrick.classifier.base
2 # API for classification visualizer hierarchy.
3 #
4 # Author: Rebecca Bilbro <[email protected]>
5 # Author: Benjamin Bengfort <[email protected]>
6 # Author: Neal Humphrey
7 # Created: Wed May 18 12:39:40 2016 -0400
8 #
9 # Copyright (C) 2016 District Data Labs
10 # For license information, see LICENSE.txt
11 #
12 # ID: base.py [5388065] [email protected] $
13
14 """
15 API for classification visualizer hierarchy.
16 """
17
18 ##########################################################################
19 ## Imports
20 ##########################################################################
21
22 import numpy as np
23
24 from ..utils import isclassifier
25 from ..base import ScoreVisualizer
26 from ..style.palettes import color_palette
27 from ..exceptions import YellowbrickTypeError
28
29
30 ##########################################################################
31 ## Base Classification Visualizer
32 ##########################################################################
33
34 class ClassificationScoreVisualizer(ScoreVisualizer):
35
36 def __init__(self, model, ax=None, classes=None, **kwargs):
37 """
38 Check to see if model is an instance of a classifer.
39 Should return an error if it isn't.
40
41 .. todo:: document this class.
42 .. tood:: accept as input classes as all visualizers need this.
43 """
44 # A bit of type checking
45 if not isclassifier(model):
46 raise YellowbrickTypeError(
47 "This estimator is not a classifier; "
48 "try a regression or clustering score visualizer instead!"
49 )
50
51 # Initialize the super method.
52 super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)
53
54 # Convert to array if necessary to match estimator.classes_
55 if classes is not None:
56 classes = np.array(classes)
57
58 # Set up classifier score visualization properties
59 if classes is not None:
60 n_colors = len(classes)
61 else:
62 n_colors = None
63
64 self.colors = color_palette(kwargs.pop('colors', None), n_colors)
65 self.classes_ = classes
66
67 @property
68 def classes_(self):
69 """
70 Proxy property to smartly access the classes from the estimator or
71 stored locally on the score visualizer for visualization.
72 """
73 if self.__classes is None:
74 try:
75 return self.estimator.classes_
76 except AttributeError:
77 return None
78 return self.__classes
79
80 @classes_.setter
81 def classes_(self, value):
82 self.__classes = value
83
84 def fit(self, X, y=None, **kwargs):
85 """
86 Parameters
87 ----------
88
89 X : ndarray or DataFrame of shape n x m
90 A matrix of n instances with m features
91
92 y : ndarray or Series of length n
93 An array or series of target or class values
94
95 kwargs: keyword arguments passed to Scikit-Learn API.
96
97 Returns
98 -------
99 self : instance
100 Returns the instance of the classification score visualizer
101
102 """
103 # Fit the inner estimator
104 self.estimator.fit(X, y)
105
106 # Extract the classes from the estimator
107 if self.classes_ is None:
108 self.classes_ = self.estimator.classes_
109
110 # Always return self from fit
111 return self
112
113 #TODO during refactoring this can be used to generalize ClassBalance
114 def class_counts(self, y):
115 unique, counts = np.unique(y, return_counts=True)
116 return dict(zip(unique, counts))
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py
--- a/yellowbrick/classifier/base.py
+++ b/yellowbrick/classifier/base.py
@@ -110,6 +110,28 @@
# Always return self from fit
return self
+
+ def score(self, X, y, **kwargs):
+ """
+ The score function is the hook for visual interaction. Pass in test
+ data and the visualizer will create predictions on the data and
+ evaluate them with respect to the test values. The evaluation will
+ then be passed to draw() and the result of the estimator score will
+ be returned.
+ Parameters
+ ----------
+ X : array-like
+ X (also X_test) are the dependent variables of test set to predict
+ y : array-like
+ y (also y_test) is the independent actual variables to score against
+ Returns
+ -------
+ score : float
+ """
+ self.score_ = self.estimator.score(X, y, **kwargs)
+
+ return self.score_
+
#TODO during refactoring this can be used to generalize ClassBalance
def class_counts(self, y):
unique, counts = np.unique(y, return_counts=True)
|
{"golden_diff": "diff --git a/yellowbrick/classifier/base.py b/yellowbrick/classifier/base.py\n--- a/yellowbrick/classifier/base.py\n+++ b/yellowbrick/classifier/base.py\n@@ -110,6 +110,28 @@\n # Always return self from fit\n return self\n \n+\n+ def score(self, X, y, **kwargs):\n+ \"\"\"\n+ The score function is the hook for visual interaction. Pass in test\n+ data and the visualizer will create predictions on the data and\n+ evaluate them with respect to the test values. The evaluation will\n+ then be passed to draw() and the result of the estimator score will\n+ be returned.\n+ Parameters\n+ ----------\n+ X : array-like\n+ X (also X_test) are the dependent variables of test set to predict\n+ y : array-like\n+ y (also y_test) is the independent actual variables to score against\n+ Returns\n+ -------\n+ score : float\n+ \"\"\"\n+ self.score_ = self.estimator.score(X, y, **kwargs)\n+\n+ return self.score_\n+\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n", "issue": "ClassificationScoreVisualizers should return accuracy\nSee #358 and #213 -- classification score visualizers should return accuracy when `score()` is called. If F1 or accuracy is not in the figure it should also be included in the figure. \n", "before_files": [{"content": "# yellowbrick.classifier.base\n# API for classification visualizer hierarchy.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Neal Humphrey\n# Created: Wed May 18 12:39:40 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: base.py [5388065] [email protected] $\n\n\"\"\"\nAPI for classification visualizer hierarchy.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom ..utils import isclassifier\nfrom ..base import ScoreVisualizer\nfrom ..style.palettes import color_palette\nfrom ..exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Base Classification Visualizer\n##########################################################################\n\nclass ClassificationScoreVisualizer(ScoreVisualizer):\n\n def __init__(self, model, ax=None, classes=None, **kwargs):\n \"\"\"\n Check to see if model is an instance of a classifer.\n Should return an error if it isn't.\n\n .. todo:: document this class.\n .. tood:: accept as input classes as all visualizers need this.\n \"\"\"\n # A bit of type checking\n if not isclassifier(model):\n raise YellowbrickTypeError(\n \"This estimator is not a classifier; \"\n \"try a regression or clustering score visualizer instead!\"\n )\n\n # Initialize the super method.\n super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Convert to array if necessary to match estimator.classes_\n if classes is not None:\n classes = np.array(classes)\n\n # Set up classifier score visualization properties\n if classes is not None:\n n_colors = len(classes)\n else:\n n_colors = None\n\n self.colors = color_palette(kwargs.pop('colors', None), n_colors)\n self.classes_ = classes\n\n @property\n def classes_(self):\n \"\"\"\n Proxy property to smartly access the classes from the estimator or\n stored locally on the score visualizer for visualization.\n \"\"\"\n if self.__classes is None:\n try:\n return self.estimator.classes_\n except AttributeError:\n return None\n return self.__classes\n\n @classes_.setter\n def classes_(self, value):\n self.__classes = value\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n X : ndarray or DataFrame of shape n x m\n A matrix of n instances with m features\n\n y : ndarray or Series of length n\n An array or series of target or class values\n\n kwargs: keyword arguments passed to Scikit-Learn API.\n\n Returns\n -------\n self : instance\n Returns the instance of the classification score visualizer\n\n \"\"\"\n # Fit the inner estimator\n self.estimator.fit(X, y)\n\n # Extract the classes from the estimator\n if self.classes_ is None:\n self.classes_ = self.estimator.classes_\n\n # Always return self from fit\n return self\n\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n return dict(zip(unique, counts))\n", "path": "yellowbrick/classifier/base.py"}], "after_files": [{"content": "# yellowbrick.classifier.base\n# API for classification visualizer hierarchy.\n#\n# Author: Rebecca Bilbro <[email protected]>\n# Author: Benjamin Bengfort <[email protected]>\n# Author: Neal Humphrey\n# Created: Wed May 18 12:39:40 2016 -0400\n#\n# Copyright (C) 2016 District Data Labs\n# For license information, see LICENSE.txt\n#\n# ID: base.py [5388065] [email protected] $\n\n\"\"\"\nAPI for classification visualizer hierarchy.\n\"\"\"\n\n##########################################################################\n## Imports\n##########################################################################\n\nimport numpy as np\n\nfrom ..utils import isclassifier\nfrom ..base import ScoreVisualizer\nfrom ..style.palettes import color_palette\nfrom ..exceptions import YellowbrickTypeError\n\n\n##########################################################################\n## Base Classification Visualizer\n##########################################################################\n\nclass ClassificationScoreVisualizer(ScoreVisualizer):\n\n def __init__(self, model, ax=None, classes=None, **kwargs):\n \"\"\"\n Check to see if model is an instance of a classifer.\n Should return an error if it isn't.\n\n .. todo:: document this class.\n .. tood:: accept as input classes as all visualizers need this.\n \"\"\"\n # A bit of type checking\n if not isclassifier(model):\n raise YellowbrickTypeError(\n \"This estimator is not a classifier; \"\n \"try a regression or clustering score visualizer instead!\"\n )\n\n # Initialize the super method.\n super(ClassificationScoreVisualizer, self).__init__(model, ax=ax, **kwargs)\n\n # Convert to array if necessary to match estimator.classes_\n if classes is not None:\n classes = np.array(classes)\n\n # Set up classifier score visualization properties\n if classes is not None:\n n_colors = len(classes)\n else:\n n_colors = None\n\n self.colors = color_palette(kwargs.pop('colors', None), n_colors)\n self.classes_ = classes\n\n @property\n def classes_(self):\n \"\"\"\n Proxy property to smartly access the classes from the estimator or\n stored locally on the score visualizer for visualization.\n \"\"\"\n if self.__classes is None:\n try:\n return self.estimator.classes_\n except AttributeError:\n return None\n return self.__classes\n\n @classes_.setter\n def classes_(self, value):\n self.__classes = value\n\n def fit(self, X, y=None, **kwargs):\n \"\"\"\n Parameters\n ----------\n\n X : ndarray or DataFrame of shape n x m\n A matrix of n instances with m features\n\n y : ndarray or Series of length n\n An array or series of target or class values\n\n kwargs: keyword arguments passed to Scikit-Learn API.\n\n Returns\n -------\n self : instance\n Returns the instance of the classification score visualizer\n\n \"\"\"\n # Fit the inner estimator\n self.estimator.fit(X, y)\n\n # Extract the classes from the estimator\n if self.classes_ is None:\n self.classes_ = self.estimator.classes_\n\n # Always return self from fit\n return self\n\n\n def score(self, X, y, **kwargs):\n \"\"\"\n The score function is the hook for visual interaction. Pass in test\n data and the visualizer will create predictions on the data and\n evaluate them with respect to the test values. The evaluation will\n then be passed to draw() and the result of the estimator score will\n be returned.\n Parameters\n ----------\n X : array-like\n X (also X_test) are the dependent variables of test set to predict\n y : array-like\n y (also y_test) is the independent actual variables to score against\n Returns\n -------\n score : float\n \"\"\"\n self.score_ = self.estimator.score(X, y, **kwargs)\n\n return self.score_\n\n #TODO during refactoring this can be used to generalize ClassBalance\n def class_counts(self, y):\n unique, counts = np.unique(y, return_counts=True)\n return dict(zip(unique, counts))\n", "path": "yellowbrick/classifier/base.py"}]}
| 1,303 | 286 |
gh_patches_debug_23280
|
rasdani/github-patches
|
git_diff
|
open-mmlab__mmaction2-723
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Error in --validate while training.
Hi,
My dataset is multiclass `VideoDataset` type and looks something like this.
```
some/path/000.mp4 1 3 5
some/path/001.mp4 1 2
some/path/002.mp4 2 6 10
```
Given video data and multi-class, I adopted my configs to test something on a small dataset from `tsn_r50_video_1x1x8_100e_kinetics400_rgb.py` and `tsn_r101_1x1x5_50e_mmit_rgb.py`
It looks something like this
```
# model settings
model = dict(
type='Recognizer2D',
backbone=dict(
type='ResNet',
pretrained='torchvision://resnet50',
depth=50,
norm_eval=False),
cls_head=dict(
type='TSNHead',
num_classes=14,
in_channels=2048,
spatial_type='avg',
consensus=dict(type='AvgConsensus', dim=1),
loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),
dropout_ratio=0.5,
init_std=0.01,
multi_class=True,
label_smooth_eps=0))
# model training and testing settings
train_cfg = None
test_cfg = dict(average_clips=None)
# dataset settings
dataset_type = 'VideoDataset'
data_root = '/home/rajawat/Desktop/videos'
data_root_val = '/home/rajawat/Desktop/val_videos'
ann_file_train = '/home/rajawat/Desktop/labels/train.txt'
ann_file_val = '/home/rajawat/Desktop/labels/val.txt'
ann_file_test = '/home/rajawat/Desktop/labels/test.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),
dict(type='DecordDecode'),
dict(type='RandomResizedCrop'),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=3,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='CenterCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
test_pipeline = [
dict(type='DecordInit'),
dict(
type='SampleFrames',
clip_len=1,
frame_interval=1,
num_clips=25,
test_mode=True),
dict(type='DecordDecode'),
dict(type='Resize', scale=(-1, 256)),
dict(type='TenCrop', crop_size=224),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs'])
]
data = dict(
videos_per_gpu=2,
workers_per_gpu=0,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_root,
pipeline=train_pipeline,
multi_class=True,
num_classes=14),
val=dict(
type=dataset_type,
ann_file=ann_file_val,
data_prefix=data_root_val,
pipeline=val_pipeline,
multi_class=True,
num_classes=14),
test=dict(
type=dataset_type,
ann_file=ann_file_test,
data_prefix=data_root_val,
pipeline=test_pipeline,
multi_class=True,
num_classes=14))
# optimizer
optimizer = dict(
type='SGD',
constructor='TSMOptimizerConstructor',
paramwise_cfg=dict(fc_lr5=True),
lr=0.01, # this lr is used for 8 gpus
momentum=0.9,
weight_decay=0.0001,
)
optimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[20, 40])
total_epochs = 10
checkpoint_config = dict(interval=5)
evaluation = dict(interval=2, metrics=['mean_average_precision'])
# yapf:disable
log_config = dict(
interval=2,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = './work_dirs/tsn_r101_1x1x5_50e_mmit_rgb/'
load_from = None
resume_from = None
workflow = [('train', 1)]
```
while training
`python tools/train.py configs/recognition/tsn/tsn_r50_1x1x5_50e_cater_rgb.py --gpus 1` works well , but with `--validate` it breaks down.
```
Traceback (most recent call last):
File "tools/train.py", line 178, in <module>
main()
File "tools/train.py", line 174, in main
meta=meta)
File "/home/rajawat/Desktop/mmaction/apis/train.py", line 156, in train_model
runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs)
File "/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 125, in run
epoch_runner(data_loaders[i], **kwargs)
File "/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py", line 54, in train
self.call_hook('after_train_epoch')
File "/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/base_runner.py", line 308, in call_hook
getattr(hook, fn_name)(self)
File "/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py", line 152, in after_train_epoch
key_score = self.evaluate(runner, results)
File "/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py", line 170, in evaluate
results, logger=runner.logger, **self.eval_kwargs)
File "/home/rajawat/Desktop/mmaction/datasets/base.py", line 215, in evaluate
for label in gt_labels
File "/home/rajawat/Desktop/mmaction/datasets/base.py", line 215, in <listcomp>
for label in gt_labels
File "/home/rajawat/Desktop/mmaction/datasets/base.py", line 121, in label2array
arr[label] = 1.
IndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices
```
Am I missing something in the config?
Thanks in advance.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `mmaction/datasets/video_dataset.py`
Content:
```
1 import os.path as osp
2
3 import torch
4
5 from .base import BaseDataset
6 from .registry import DATASETS
7
8
9 @DATASETS.register_module()
10 class VideoDataset(BaseDataset):
11 """Video dataset for action recognition.
12
13 The dataset loads raw videos and apply specified transforms to return a
14 dict containing the frame tensors and other information.
15
16 The ann_file is a text file with multiple lines, and each line indicates
17 a sample video with the filepath and label, which are split with a
18 whitespace. Example of a annotation file:
19
20 .. code-block:: txt
21
22 some/path/000.mp4 1
23 some/path/001.mp4 1
24 some/path/002.mp4 2
25 some/path/003.mp4 2
26 some/path/004.mp4 3
27 some/path/005.mp4 3
28
29
30 Args:
31 ann_file (str): Path to the annotation file.
32 pipeline (list[dict | callable]): A sequence of data transforms.
33 start_index (int): Specify a start index for frames in consideration of
34 different filename format. However, when taking videos as input,
35 it should be set to 0, since frames loaded from videos count
36 from 0. Default: 0.
37 **kwargs: Keyword arguments for ``BaseDataset``.
38 """
39
40 def __init__(self, ann_file, pipeline, start_index=0, **kwargs):
41 super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)
42
43 def load_annotations(self):
44 """Load annotation file to get video information."""
45 if self.ann_file.endswith('.json'):
46 return self.load_json_annotations()
47
48 video_infos = []
49 with open(self.ann_file, 'r') as fin:
50 for line in fin:
51 line_split = line.strip().split()
52 if self.multi_class:
53 assert self.num_classes is not None
54 filename, label = line_split[0], line_split[1:]
55 label = list(map(int, label))
56 onehot = torch.zeros(self.num_classes)
57 onehot[label] = 1.0
58 else:
59 filename, label = line_split
60 label = int(label)
61 if self.data_prefix is not None:
62 filename = osp.join(self.data_prefix, filename)
63 video_infos.append(
64 dict(
65 filename=filename,
66 label=onehot if self.multi_class else label))
67 return video_infos
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/mmaction/datasets/video_dataset.py b/mmaction/datasets/video_dataset.py
--- a/mmaction/datasets/video_dataset.py
+++ b/mmaction/datasets/video_dataset.py
@@ -1,7 +1,5 @@
import os.path as osp
-import torch
-
from .base import BaseDataset
from .registry import DATASETS
@@ -53,15 +51,10 @@
assert self.num_classes is not None
filename, label = line_split[0], line_split[1:]
label = list(map(int, label))
- onehot = torch.zeros(self.num_classes)
- onehot[label] = 1.0
else:
filename, label = line_split
label = int(label)
if self.data_prefix is not None:
filename = osp.join(self.data_prefix, filename)
- video_infos.append(
- dict(
- filename=filename,
- label=onehot if self.multi_class else label))
+ video_infos.append(dict(filename=filename, label=label))
return video_infos
|
{"golden_diff": "diff --git a/mmaction/datasets/video_dataset.py b/mmaction/datasets/video_dataset.py\n--- a/mmaction/datasets/video_dataset.py\n+++ b/mmaction/datasets/video_dataset.py\n@@ -1,7 +1,5 @@\n import os.path as osp\n \n-import torch\n-\n from .base import BaseDataset\n from .registry import DATASETS\n \n@@ -53,15 +51,10 @@\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n- onehot = torch.zeros(self.num_classes)\n- onehot[label] = 1.0\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n- video_infos.append(\n- dict(\n- filename=filename,\n- label=onehot if self.multi_class else label))\n+ video_infos.append(dict(filename=filename, label=label))\n return video_infos\n", "issue": "Error in --validate while training.\nHi,\r\n\r\nMy dataset is multiclass `VideoDataset` type and looks something like this.\r\n```\r\n\r\n some/path/000.mp4 1 3 5\r\n some/path/001.mp4 1 2\r\n some/path/002.mp4 2 6 10\r\n\r\n```\r\nGiven video data and multi-class, I adopted my configs to test something on a small dataset from `tsn_r50_video_1x1x8_100e_kinetics400_rgb.py` and `tsn_r101_1x1x5_50e_mmit_rgb.py` \r\n\r\nIt looks something like this\r\n```\r\n# model settings\r\nmodel = dict(\r\n type='Recognizer2D',\r\n backbone=dict(\r\n type='ResNet',\r\n pretrained='torchvision://resnet50',\r\n depth=50,\r\n norm_eval=False),\r\n cls_head=dict(\r\n type='TSNHead',\r\n num_classes=14,\r\n in_channels=2048,\r\n spatial_type='avg',\r\n consensus=dict(type='AvgConsensus', dim=1),\r\n loss_cls=dict(type='BCELossWithLogits', loss_weight=160.0),\r\n dropout_ratio=0.5,\r\n init_std=0.01,\r\n multi_class=True,\r\n label_smooth_eps=0))\r\n# model training and testing settings\r\ntrain_cfg = None\r\ntest_cfg = dict(average_clips=None)\r\n# dataset settings\r\ndataset_type = 'VideoDataset'\r\ndata_root = '/home/rajawat/Desktop/videos'\r\ndata_root_val = '/home/rajawat/Desktop/val_videos'\r\nann_file_train = '/home/rajawat/Desktop/labels/train.txt'\r\nann_file_val = '/home/rajawat/Desktop/labels/val.txt'\r\nann_file_test = '/home/rajawat/Desktop/labels/test.txt'\r\nimg_norm_cfg = dict(\r\n mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)\r\ntrain_pipeline = [\r\n dict(type='DecordInit'),\r\n dict(type='SampleFrames', clip_len=1, frame_interval=1, num_clips=3),\r\n dict(type='DecordDecode'),\r\n dict(type='RandomResizedCrop'),\r\n dict(type='Resize', scale=(224, 224), keep_ratio=False),\r\n dict(type='Flip', flip_ratio=0.5),\r\n dict(type='Normalize', **img_norm_cfg),\r\n dict(type='FormatShape', input_format='NCHW'),\r\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\r\n dict(type='ToTensor', keys=['imgs', 'label'])\r\n]\r\nval_pipeline = [\r\n dict(type='DecordInit'),\r\n dict(\r\n type='SampleFrames',\r\n clip_len=1,\r\n frame_interval=1,\r\n num_clips=3,\r\n test_mode=True),\r\n dict(type='DecordDecode'),\r\n dict(type='Resize', scale=(-1, 256)),\r\n dict(type='CenterCrop', crop_size=224),\r\n dict(type='Flip', flip_ratio=0),\r\n dict(type='Normalize', **img_norm_cfg),\r\n dict(type='FormatShape', input_format='NCHW'),\r\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\r\n dict(type='ToTensor', keys=['imgs'])\r\n]\r\ntest_pipeline = [\r\n dict(type='DecordInit'),\r\n dict(\r\n type='SampleFrames',\r\n clip_len=1,\r\n frame_interval=1,\r\n num_clips=25,\r\n test_mode=True),\r\n dict(type='DecordDecode'),\r\n dict(type='Resize', scale=(-1, 256)),\r\n dict(type='TenCrop', crop_size=224),\r\n dict(type='Flip', flip_ratio=0),\r\n dict(type='Normalize', **img_norm_cfg),\r\n dict(type='FormatShape', input_format='NCHW'),\r\n dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),\r\n dict(type='ToTensor', keys=['imgs'])\r\n]\r\n\r\ndata = dict(\r\n videos_per_gpu=2,\r\n workers_per_gpu=0,\r\n train=dict(\r\n type=dataset_type,\r\n ann_file=ann_file_train,\r\n data_prefix=data_root,\r\n pipeline=train_pipeline,\r\n multi_class=True,\r\n num_classes=14),\r\n val=dict(\r\n type=dataset_type,\r\n ann_file=ann_file_val,\r\n data_prefix=data_root_val,\r\n pipeline=val_pipeline,\r\n multi_class=True,\r\n num_classes=14),\r\n test=dict(\r\n type=dataset_type,\r\n ann_file=ann_file_test,\r\n data_prefix=data_root_val,\r\n pipeline=test_pipeline,\r\n multi_class=True,\r\n num_classes=14))\r\n# optimizer\r\noptimizer = dict(\r\n type='SGD',\r\n constructor='TSMOptimizerConstructor',\r\n paramwise_cfg=dict(fc_lr5=True),\r\n lr=0.01, # this lr is used for 8 gpus\r\n momentum=0.9,\r\n weight_decay=0.0001,\r\n)\r\noptimizer_config = dict(grad_clip=dict(max_norm=20, norm_type=2))\r\n# learning policy\r\nlr_config = dict(policy='step', step=[20, 40])\r\ntotal_epochs = 10\r\ncheckpoint_config = dict(interval=5)\r\nevaluation = dict(interval=2, metrics=['mean_average_precision'])\r\n# yapf:disable\r\nlog_config = dict(\r\n interval=2,\r\n hooks=[\r\n dict(type='TextLoggerHook'),\r\n # dict(type='TensorboardLoggerHook'),\r\n ])\r\n# runtime settings\r\ndist_params = dict(backend='nccl')\r\nlog_level = 'INFO'\r\nwork_dir = './work_dirs/tsn_r101_1x1x5_50e_mmit_rgb/'\r\nload_from = None\r\nresume_from = None\r\nworkflow = [('train', 1)]\r\n\r\n```\r\n\r\nwhile training \r\n`python tools/train.py configs/recognition/tsn/tsn_r50_1x1x5_50e_cater_rgb.py --gpus 1` works well , but with `--validate` it breaks down. \r\n\r\n```\r\nTraceback (most recent call last):\r\n File \"tools/train.py\", line 178, in <module>\r\n main()\r\n File \"tools/train.py\", line 174, in main\r\n meta=meta)\r\n File \"/home/rajawat/Desktop/mmaction/apis/train.py\", line 156, in train_model\r\n runner.run(data_loaders, cfg.workflow, cfg.total_epochs, **runner_kwargs)\r\n File \"/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py\", line 125, in run\r\n epoch_runner(data_loaders[i], **kwargs)\r\n File \"/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/epoch_based_runner.py\", line 54, in train\r\n self.call_hook('after_train_epoch')\r\n File \"/home/rajawat/anaconda3/envs/slowfast/lib/python3.7/site-packages/mmcv/runner/base_runner.py\", line 308, in call_hook\r\n getattr(hook, fn_name)(self)\r\n File \"/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py\", line 152, in after_train_epoch\r\n key_score = self.evaluate(runner, results)\r\n File \"/home/rajawat/Desktop/mmaction/core/evaluation/eval_hooks.py\", line 170, in evaluate\r\n results, logger=runner.logger, **self.eval_kwargs)\r\n File \"/home/rajawat/Desktop/mmaction/datasets/base.py\", line 215, in evaluate\r\n for label in gt_labels\r\n File \"/home/rajawat/Desktop/mmaction/datasets/base.py\", line 215, in <listcomp>\r\n for label in gt_labels\r\n File \"/home/rajawat/Desktop/mmaction/datasets/base.py\", line 121, in label2array\r\n arr[label] = 1.\r\nIndexError: only integers, slices (`:`), ellipsis (`...`), numpy.newaxis (`None`) and integer or boolean arrays are valid indices\r\n\r\n```\r\nAm I missing something in the config?\r\nThanks in advance.\n", "before_files": [{"content": "import os.path as osp\n\nimport torch\n\nfrom .base import BaseDataset\nfrom .registry import DATASETS\n\n\[email protected]_module()\nclass VideoDataset(BaseDataset):\n \"\"\"Video dataset for action recognition.\n\n The dataset loads raw videos and apply specified transforms to return a\n dict containing the frame tensors and other information.\n\n The ann_file is a text file with multiple lines, and each line indicates\n a sample video with the filepath and label, which are split with a\n whitespace. Example of a annotation file:\n\n .. code-block:: txt\n\n some/path/000.mp4 1\n some/path/001.mp4 1\n some/path/002.mp4 2\n some/path/003.mp4 2\n some/path/004.mp4 3\n some/path/005.mp4 3\n\n\n Args:\n ann_file (str): Path to the annotation file.\n pipeline (list[dict | callable]): A sequence of data transforms.\n start_index (int): Specify a start index for frames in consideration of\n different filename format. However, when taking videos as input,\n it should be set to 0, since frames loaded from videos count\n from 0. Default: 0.\n **kwargs: Keyword arguments for ``BaseDataset``.\n \"\"\"\n\n def __init__(self, ann_file, pipeline, start_index=0, **kwargs):\n super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)\n\n def load_annotations(self):\n \"\"\"Load annotation file to get video information.\"\"\"\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n onehot = torch.zeros(self.num_classes)\n onehot[label] = 1.0\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(\n dict(\n filename=filename,\n label=onehot if self.multi_class else label))\n return video_infos\n", "path": "mmaction/datasets/video_dataset.py"}], "after_files": [{"content": "import os.path as osp\n\nfrom .base import BaseDataset\nfrom .registry import DATASETS\n\n\[email protected]_module()\nclass VideoDataset(BaseDataset):\n \"\"\"Video dataset for action recognition.\n\n The dataset loads raw videos and apply specified transforms to return a\n dict containing the frame tensors and other information.\n\n The ann_file is a text file with multiple lines, and each line indicates\n a sample video with the filepath and label, which are split with a\n whitespace. Example of a annotation file:\n\n .. code-block:: txt\n\n some/path/000.mp4 1\n some/path/001.mp4 1\n some/path/002.mp4 2\n some/path/003.mp4 2\n some/path/004.mp4 3\n some/path/005.mp4 3\n\n\n Args:\n ann_file (str): Path to the annotation file.\n pipeline (list[dict | callable]): A sequence of data transforms.\n start_index (int): Specify a start index for frames in consideration of\n different filename format. However, when taking videos as input,\n it should be set to 0, since frames loaded from videos count\n from 0. Default: 0.\n **kwargs: Keyword arguments for ``BaseDataset``.\n \"\"\"\n\n def __init__(self, ann_file, pipeline, start_index=0, **kwargs):\n super().__init__(ann_file, pipeline, start_index=start_index, **kwargs)\n\n def load_annotations(self):\n \"\"\"Load annotation file to get video information.\"\"\"\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(dict(filename=filename, label=label))\n return video_infos\n", "path": "mmaction/datasets/video_dataset.py"}]}
| 2,752 | 229 |
gh_patches_debug_32057
|
rasdani/github-patches
|
git_diff
|
doccano__doccano-1985
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
[Feature Request] Allowed to add more metadata for a project
Feature description
---------
currently we have many annotation projects in doccano.
However, it is not easy to find the the right project. Because the information for a project is only its name.
- If the user could add more metadata for a project will be good. Such as the created data, created user, description. And all those metadata could be shown in project list page to help the user find the project.
- the metadata for a project could be modified. For example, we created the project in a bad name such as "DocumentationClassification-1". And we can't change the name.
- some way to search the project or sort the project or filter the project? For example, sort the project by creation date or only shown the project created by a user.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `backend/projects/serializers.py`
Content:
```
1 from rest_framework import serializers
2 from rest_polymorphic.serializers import PolymorphicSerializer
3
4 from .models import (
5 BoundingBoxProject,
6 ImageCaptioningProject,
7 ImageClassificationProject,
8 IntentDetectionAndSlotFillingProject,
9 Member,
10 Project,
11 SegmentationProject,
12 Seq2seqProject,
13 SequenceLabelingProject,
14 Speech2textProject,
15 Tag,
16 TextClassificationProject,
17 )
18
19
20 class MemberSerializer(serializers.ModelSerializer):
21 username = serializers.SerializerMethodField()
22 rolename = serializers.SerializerMethodField()
23
24 @classmethod
25 def get_username(cls, instance):
26 user = instance.user
27 return user.username if user else None
28
29 @classmethod
30 def get_rolename(cls, instance):
31 role = instance.role
32 return role.name if role else None
33
34 class Meta:
35 model = Member
36 fields = ("id", "user", "role", "username", "rolename")
37
38
39 class TagSerializer(serializers.ModelSerializer):
40 class Meta:
41 model = Tag
42 fields = (
43 "id",
44 "project",
45 "text",
46 )
47 read_only_fields = ("id", "project")
48
49
50 class ProjectSerializer(serializers.ModelSerializer):
51 tags = TagSerializer(many=True, required=False)
52
53 class Meta:
54 model = Project
55 fields = [
56 "id",
57 "name",
58 "description",
59 "guideline",
60 "project_type",
61 "updated_at",
62 "random_order",
63 "created_by",
64 "collaborative_annotation",
65 "single_class_classification",
66 "is_text_project",
67 "can_define_label",
68 "can_define_relation",
69 "can_define_category",
70 "can_define_span",
71 "tags",
72 ]
73 read_only_fields = (
74 "updated_at",
75 "is_text_project",
76 "can_define_label",
77 "can_define_relation",
78 "can_define_category",
79 "can_define_span",
80 )
81
82 def create(self, validated_data):
83 tags = TagSerializer(data=validated_data.pop("tags", []), many=True)
84 project = self.Meta.model.objects.create(**validated_data)
85 tags.is_valid()
86 tags.save(project=project)
87 return project
88
89 def update(self, instance, validated_data):
90 # Don't update tags. Please use TagAPI.
91 validated_data.pop("tags", None)
92 return super().update(instance, validated_data)
93
94
95 class TextClassificationProjectSerializer(ProjectSerializer):
96 class Meta(ProjectSerializer.Meta):
97 model = TextClassificationProject
98
99
100 class SequenceLabelingProjectSerializer(ProjectSerializer):
101 class Meta(ProjectSerializer.Meta):
102 model = SequenceLabelingProject
103 fields = ProjectSerializer.Meta.fields + ["allow_overlapping", "grapheme_mode", "use_relation"]
104
105
106 class Seq2seqProjectSerializer(ProjectSerializer):
107 class Meta(ProjectSerializer.Meta):
108 model = Seq2seqProject
109
110
111 class IntentDetectionAndSlotFillingProjectSerializer(ProjectSerializer):
112 class Meta(ProjectSerializer.Meta):
113 model = IntentDetectionAndSlotFillingProject
114
115
116 class Speech2textProjectSerializer(ProjectSerializer):
117 class Meta(ProjectSerializer.Meta):
118 model = Speech2textProject
119
120
121 class ImageClassificationProjectSerializer(ProjectSerializer):
122 class Meta(ProjectSerializer.Meta):
123 model = ImageClassificationProject
124
125
126 class BoundingBoxProjectSerializer(ProjectSerializer):
127 class Meta(ProjectSerializer.Meta):
128 model = BoundingBoxProject
129
130
131 class SegmentationProjectSerializer(ProjectSerializer):
132 class Meta(ProjectSerializer.Meta):
133 model = SegmentationProject
134
135
136 class ImageCaptioningProjectSerializer(ProjectSerializer):
137 class Meta(ProjectSerializer.Meta):
138 model = ImageCaptioningProject
139
140
141 class ProjectPolymorphicSerializer(PolymorphicSerializer):
142 model_serializer_mapping = {
143 Project: ProjectSerializer,
144 **{cls.Meta.model: cls for cls in ProjectSerializer.__subclasses__()},
145 }
146
```
Path: `backend/projects/views/project.py`
Content:
```
1 from django.conf import settings
2 from django_filters.rest_framework import DjangoFilterBackend
3 from rest_framework import filters, generics, status
4 from rest_framework.permissions import IsAdminUser, IsAuthenticated
5 from rest_framework.response import Response
6
7 from projects.models import Project
8 from projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly
9 from projects.serializers import ProjectPolymorphicSerializer
10
11
12 class ProjectList(generics.ListCreateAPIView):
13 serializer_class = ProjectPolymorphicSerializer
14 filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
15 search_fields = ("name", "description")
16
17 def get_permissions(self):
18 if self.request.method == "GET":
19 self.permission_classes = [
20 IsAuthenticated,
21 ]
22 else:
23 self.permission_classes = [IsAuthenticated & IsAdminUser]
24 return super().get_permissions()
25
26 def get_queryset(self):
27 return Project.objects.filter(role_mappings__user=self.request.user)
28
29 def perform_create(self, serializer):
30 project = serializer.save(created_by=self.request.user)
31 project.add_admin()
32
33 def delete(self, request, *args, **kwargs):
34 delete_ids = request.data["ids"]
35 projects = Project.objects.filter(
36 role_mappings__user=self.request.user,
37 role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,
38 pk__in=delete_ids,
39 )
40 # Todo: I want to use bulk delete.
41 # But it causes the constraint error.
42 # See https://github.com/django-polymorphic/django-polymorphic/issues/229
43 for project in projects:
44 project.delete()
45 return Response(status=status.HTTP_204_NO_CONTENT)
46
47
48 class ProjectDetail(generics.RetrieveUpdateDestroyAPIView):
49 queryset = Project.objects.all()
50 serializer_class = ProjectPolymorphicSerializer
51 lookup_url_kwarg = "project_id"
52 permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]
53
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/backend/projects/serializers.py b/backend/projects/serializers.py
--- a/backend/projects/serializers.py
+++ b/backend/projects/serializers.py
@@ -49,6 +49,13 @@
class ProjectSerializer(serializers.ModelSerializer):
tags = TagSerializer(many=True, required=False)
+ author = serializers.SerializerMethodField()
+
+ @classmethod
+ def get_author(cls, instance):
+ if instance.created_by:
+ return instance.created_by.username
+ return ""
class Meta:
model = Project
@@ -58,9 +65,10 @@
"description",
"guideline",
"project_type",
+ "created_at",
"updated_at",
"random_order",
- "created_by",
+ "author",
"collaborative_annotation",
"single_class_classification",
"is_text_project",
@@ -71,7 +79,9 @@
"tags",
]
read_only_fields = (
+ "created_at",
"updated_at",
+ "author",
"is_text_project",
"can_define_label",
"can_define_relation",
diff --git a/backend/projects/views/project.py b/backend/projects/views/project.py
--- a/backend/projects/views/project.py
+++ b/backend/projects/views/project.py
@@ -13,6 +13,8 @@
serializer_class = ProjectPolymorphicSerializer
filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)
search_fields = ("name", "description")
+ ordering_fields = ["name", "created_at", "created_by", "project_type"]
+ ordering = ["-created_at"]
def get_permissions(self):
if self.request.method == "GET":
|
{"golden_diff": "diff --git a/backend/projects/serializers.py b/backend/projects/serializers.py\n--- a/backend/projects/serializers.py\n+++ b/backend/projects/serializers.py\n@@ -49,6 +49,13 @@\n \n class ProjectSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True, required=False)\n+ author = serializers.SerializerMethodField()\n+\n+ @classmethod\n+ def get_author(cls, instance):\n+ if instance.created_by:\n+ return instance.created_by.username\n+ return \"\"\n \n class Meta:\n model = Project\n@@ -58,9 +65,10 @@\n \"description\",\n \"guideline\",\n \"project_type\",\n+ \"created_at\",\n \"updated_at\",\n \"random_order\",\n- \"created_by\",\n+ \"author\",\n \"collaborative_annotation\",\n \"single_class_classification\",\n \"is_text_project\",\n@@ -71,7 +79,9 @@\n \"tags\",\n ]\n read_only_fields = (\n+ \"created_at\",\n \"updated_at\",\n+ \"author\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\ndiff --git a/backend/projects/views/project.py b/backend/projects/views/project.py\n--- a/backend/projects/views/project.py\n+++ b/backend/projects/views/project.py\n@@ -13,6 +13,8 @@\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"name\", \"description\")\n+ ordering_fields = [\"name\", \"created_at\", \"created_by\", \"project_type\"]\n+ ordering = [\"-created_at\"]\n \n def get_permissions(self):\n if self.request.method == \"GET\":\n", "issue": "[Feature Request] Allowed to add more metadata for a project\nFeature description\r\n---------\r\ncurrently we have many annotation projects in doccano.\r\nHowever, it is not easy to find the the right project. Because the information for a project is only its name.\r\n- If the user could add more metadata for a project will be good. Such as the created data, created user, description. And all those metadata could be shown in project list page to help the user find the project.\r\n- the metadata for a project could be modified. For example, we created the project in a bad name such as \"DocumentationClassification-1\". And we can't change the name.\r\n- some way to search the project or sort the project or filter the project? For example, sort the project by creation date or only shown the project created by a user.\r\n\n", "before_files": [{"content": "from rest_framework import serializers\nfrom rest_polymorphic.serializers import PolymorphicSerializer\n\nfrom .models import (\n BoundingBoxProject,\n ImageCaptioningProject,\n ImageClassificationProject,\n IntentDetectionAndSlotFillingProject,\n Member,\n Project,\n SegmentationProject,\n Seq2seqProject,\n SequenceLabelingProject,\n Speech2textProject,\n Tag,\n TextClassificationProject,\n)\n\n\nclass MemberSerializer(serializers.ModelSerializer):\n username = serializers.SerializerMethodField()\n rolename = serializers.SerializerMethodField()\n\n @classmethod\n def get_username(cls, instance):\n user = instance.user\n return user.username if user else None\n\n @classmethod\n def get_rolename(cls, instance):\n role = instance.role\n return role.name if role else None\n\n class Meta:\n model = Member\n fields = (\"id\", \"user\", \"role\", \"username\", \"rolename\")\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = (\n \"id\",\n \"project\",\n \"text\",\n )\n read_only_fields = (\"id\", \"project\")\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True, required=False)\n\n class Meta:\n model = Project\n fields = [\n \"id\",\n \"name\",\n \"description\",\n \"guideline\",\n \"project_type\",\n \"updated_at\",\n \"random_order\",\n \"created_by\",\n \"collaborative_annotation\",\n \"single_class_classification\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\n \"can_define_category\",\n \"can_define_span\",\n \"tags\",\n ]\n read_only_fields = (\n \"updated_at\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\n \"can_define_category\",\n \"can_define_span\",\n )\n\n def create(self, validated_data):\n tags = TagSerializer(data=validated_data.pop(\"tags\", []), many=True)\n project = self.Meta.model.objects.create(**validated_data)\n tags.is_valid()\n tags.save(project=project)\n return project\n\n def update(self, instance, validated_data):\n # Don't update tags. Please use TagAPI.\n validated_data.pop(\"tags\", None)\n return super().update(instance, validated_data)\n\n\nclass TextClassificationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = TextClassificationProject\n\n\nclass SequenceLabelingProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = SequenceLabelingProject\n fields = ProjectSerializer.Meta.fields + [\"allow_overlapping\", \"grapheme_mode\", \"use_relation\"]\n\n\nclass Seq2seqProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = Seq2seqProject\n\n\nclass IntentDetectionAndSlotFillingProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = IntentDetectionAndSlotFillingProject\n\n\nclass Speech2textProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = Speech2textProject\n\n\nclass ImageClassificationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = ImageClassificationProject\n\n\nclass BoundingBoxProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = BoundingBoxProject\n\n\nclass SegmentationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = SegmentationProject\n\n\nclass ImageCaptioningProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = ImageCaptioningProject\n\n\nclass ProjectPolymorphicSerializer(PolymorphicSerializer):\n model_serializer_mapping = {\n Project: ProjectSerializer,\n **{cls.Meta.model: cls for cls in ProjectSerializer.__subclasses__()},\n }\n", "path": "backend/projects/serializers.py"}, {"content": "from django.conf import settings\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom projects.models import Project\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\nfrom projects.serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"name\", \"description\")\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n self.permission_classes = [\n IsAuthenticated,\n ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n project = serializer.save(created_by=self.request.user)\n project.add_admin()\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data[\"ids\"]\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids,\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = \"project_id\"\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/projects/views/project.py"}], "after_files": [{"content": "from rest_framework import serializers\nfrom rest_polymorphic.serializers import PolymorphicSerializer\n\nfrom .models import (\n BoundingBoxProject,\n ImageCaptioningProject,\n ImageClassificationProject,\n IntentDetectionAndSlotFillingProject,\n Member,\n Project,\n SegmentationProject,\n Seq2seqProject,\n SequenceLabelingProject,\n Speech2textProject,\n Tag,\n TextClassificationProject,\n)\n\n\nclass MemberSerializer(serializers.ModelSerializer):\n username = serializers.SerializerMethodField()\n rolename = serializers.SerializerMethodField()\n\n @classmethod\n def get_username(cls, instance):\n user = instance.user\n return user.username if user else None\n\n @classmethod\n def get_rolename(cls, instance):\n role = instance.role\n return role.name if role else None\n\n class Meta:\n model = Member\n fields = (\"id\", \"user\", \"role\", \"username\", \"rolename\")\n\n\nclass TagSerializer(serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = (\n \"id\",\n \"project\",\n \"text\",\n )\n read_only_fields = (\"id\", \"project\")\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n tags = TagSerializer(many=True, required=False)\n author = serializers.SerializerMethodField()\n\n @classmethod\n def get_author(cls, instance):\n if instance.created_by:\n return instance.created_by.username\n return \"\"\n\n class Meta:\n model = Project\n fields = [\n \"id\",\n \"name\",\n \"description\",\n \"guideline\",\n \"project_type\",\n \"created_at\",\n \"updated_at\",\n \"random_order\",\n \"author\",\n \"collaborative_annotation\",\n \"single_class_classification\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\n \"can_define_category\",\n \"can_define_span\",\n \"tags\",\n ]\n read_only_fields = (\n \"created_at\",\n \"updated_at\",\n \"author\",\n \"is_text_project\",\n \"can_define_label\",\n \"can_define_relation\",\n \"can_define_category\",\n \"can_define_span\",\n )\n\n def create(self, validated_data):\n tags = TagSerializer(data=validated_data.pop(\"tags\", []), many=True)\n project = self.Meta.model.objects.create(**validated_data)\n tags.is_valid()\n tags.save(project=project)\n return project\n\n def update(self, instance, validated_data):\n # Don't update tags. Please use TagAPI.\n validated_data.pop(\"tags\", None)\n return super().update(instance, validated_data)\n\n\nclass TextClassificationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = TextClassificationProject\n\n\nclass SequenceLabelingProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = SequenceLabelingProject\n fields = ProjectSerializer.Meta.fields + [\"allow_overlapping\", \"grapheme_mode\", \"use_relation\"]\n\n\nclass Seq2seqProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = Seq2seqProject\n\n\nclass IntentDetectionAndSlotFillingProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = IntentDetectionAndSlotFillingProject\n\n\nclass Speech2textProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = Speech2textProject\n\n\nclass ImageClassificationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = ImageClassificationProject\n\n\nclass BoundingBoxProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = BoundingBoxProject\n\n\nclass SegmentationProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = SegmentationProject\n\n\nclass ImageCaptioningProjectSerializer(ProjectSerializer):\n class Meta(ProjectSerializer.Meta):\n model = ImageCaptioningProject\n\n\nclass ProjectPolymorphicSerializer(PolymorphicSerializer):\n model_serializer_mapping = {\n Project: ProjectSerializer,\n **{cls.Meta.model: cls for cls in ProjectSerializer.__subclasses__()},\n }\n", "path": "backend/projects/serializers.py"}, {"content": "from django.conf import settings\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework import filters, generics, status\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom projects.models import Project\nfrom projects.permissions import IsProjectAdmin, IsProjectStaffAndReadOnly\nfrom projects.serializers import ProjectPolymorphicSerializer\n\n\nclass ProjectList(generics.ListCreateAPIView):\n serializer_class = ProjectPolymorphicSerializer\n filter_backends = (DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter)\n search_fields = (\"name\", \"description\")\n ordering_fields = [\"name\", \"created_at\", \"created_by\", \"project_type\"]\n ordering = [\"-created_at\"]\n\n def get_permissions(self):\n if self.request.method == \"GET\":\n self.permission_classes = [\n IsAuthenticated,\n ]\n else:\n self.permission_classes = [IsAuthenticated & IsAdminUser]\n return super().get_permissions()\n\n def get_queryset(self):\n return Project.objects.filter(role_mappings__user=self.request.user)\n\n def perform_create(self, serializer):\n project = serializer.save(created_by=self.request.user)\n project.add_admin()\n\n def delete(self, request, *args, **kwargs):\n delete_ids = request.data[\"ids\"]\n projects = Project.objects.filter(\n role_mappings__user=self.request.user,\n role_mappings__role__name=settings.ROLE_PROJECT_ADMIN,\n pk__in=delete_ids,\n )\n # Todo: I want to use bulk delete.\n # But it causes the constraint error.\n # See https://github.com/django-polymorphic/django-polymorphic/issues/229\n for project in projects:\n project.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass ProjectDetail(generics.RetrieveUpdateDestroyAPIView):\n queryset = Project.objects.all()\n serializer_class = ProjectPolymorphicSerializer\n lookup_url_kwarg = \"project_id\"\n permission_classes = [IsAuthenticated & (IsProjectAdmin | IsProjectStaffAndReadOnly)]\n", "path": "backend/projects/views/project.py"}]}
| 2,096 | 381 |
gh_patches_debug_8883
|
rasdani/github-patches
|
git_diff
|
python-pillow__Pillow-906
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Cannot from PIL import ImageGrab
Does Pillow2.5.3 ImageGrab still not support other OS except windows?
If not, why we cannot do that?
---
/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py
Python 2.7.5 (default, Mar 9 2014, 22:15:05)
[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
> > > from PIL import ImageGrab
> > > Traceback (most recent call last):
> > > File "<stdin>", line 1, in <module>
> > > File "build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py", line 26, in <module>
> > > ImportError: No module named _grabscreen
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `PIL/ImageGrab.py`
Content:
```
1 #
2 # The Python Imaging Library
3 # $Id$
4 #
5 # screen grabber (windows only)
6 #
7 # History:
8 # 2001-04-26 fl created
9 # 2001-09-17 fl use builtin driver, if present
10 # 2002-11-19 fl added grabclipboard support
11 #
12 # Copyright (c) 2001-2002 by Secret Labs AB
13 # Copyright (c) 2001-2002 by Fredrik Lundh
14 #
15 # See the README file for information on usage and redistribution.
16 #
17
18 from PIL import Image
19
20
21 try:
22 # built-in driver (1.1.3 and later)
23 grabber = Image.core.grabscreen
24 except AttributeError:
25 # stand-alone driver (pil plus)
26 import _grabscreen
27 grabber = _grabscreen.grab
28
29
30 def grab(bbox=None):
31 size, data = grabber()
32 im = Image.frombytes(
33 "RGB", size, data,
34 # RGB, 32-bit line padding, origo in lower left corner
35 "raw", "BGR", (size[0]*3 + 3) & -4, -1
36 )
37 if bbox:
38 im = im.crop(bbox)
39 return im
40
41
42 def grabclipboard():
43 debug = 0 # temporary interface
44 data = Image.core.grabclipboard(debug)
45 if isinstance(data, bytes):
46 from PIL import BmpImagePlugin
47 import io
48 return BmpImagePlugin.DibImageFile(io.BytesIO(data))
49 return data
50
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py
--- a/PIL/ImageGrab.py
+++ b/PIL/ImageGrab.py
@@ -17,6 +17,9 @@
from PIL import Image
+import sys
+if sys.platform != "win32":
+ raise ImportError("ImageGrab is Windows only")
try:
# built-in driver (1.1.3 and later)
@@ -40,7 +43,7 @@
def grabclipboard():
- debug = 0 # temporary interface
+ debug = 0 # temporary interface
data = Image.core.grabclipboard(debug)
if isinstance(data, bytes):
from PIL import BmpImagePlugin
|
{"golden_diff": "diff --git a/PIL/ImageGrab.py b/PIL/ImageGrab.py\n--- a/PIL/ImageGrab.py\n+++ b/PIL/ImageGrab.py\n@@ -17,6 +17,9 @@\n \n from PIL import Image\n \n+import sys\n+if sys.platform != \"win32\":\n+ raise ImportError(\"ImageGrab is Windows only\")\n \n try:\n # built-in driver (1.1.3 and later)\n@@ -40,7 +43,7 @@\n \n \n def grabclipboard():\n- debug = 0 # temporary interface\n+ debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n", "issue": "Cannot from PIL import ImageGrab\nDoes Pillow2.5.3 ImageGrab still not support other OS except windows?\nIf not, why we cannot do that?\n\n---\n\n/Library/Python/2.7/site-packages/Pillow-2.5.3-py2.7-macosx-10.9-intel.egg/PIL/**init**.py\n\nPython 2.7.5 (default, Mar 9 2014, 22:15:05)\n[GCC 4.2.1 Compatible Apple LLVM 5.0 (clang-500.0.68)] on darwin\nType \"help\", \"copyright\", \"credits\" or \"license\" for more information.\n\n> > > from PIL import ImageGrab\n> > > Traceback (most recent call last):\n> > > File \"<stdin>\", line 1, in <module>\n> > > File \"build/bdist.macosx-10.9-intel/egg/PIL/ImageGrab.py\", line 26, in <module>\n> > > ImportError: No module named _grabscreen\n\n", "before_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom PIL import Image\n\n\ntry:\n # built-in driver (1.1.3 and later)\n grabber = Image.core.grabscreen\nexcept AttributeError:\n # stand-alone driver (pil plus)\n import _grabscreen\n grabber = _grabscreen.grab\n\n\ndef grab(bbox=None):\n size, data = grabber()\n im = Image.frombytes(\n \"RGB\", size, data,\n # RGB, 32-bit line padding, origo in lower left corner\n \"raw\", \"BGR\", (size[0]*3 + 3) & -4, -1\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n import io\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "PIL/ImageGrab.py"}], "after_files": [{"content": "#\n# The Python Imaging Library\n# $Id$\n#\n# screen grabber (windows only)\n#\n# History:\n# 2001-04-26 fl created\n# 2001-09-17 fl use builtin driver, if present\n# 2002-11-19 fl added grabclipboard support\n#\n# Copyright (c) 2001-2002 by Secret Labs AB\n# Copyright (c) 2001-2002 by Fredrik Lundh\n#\n# See the README file for information on usage and redistribution.\n#\n\nfrom PIL import Image\n\nimport sys\nif sys.platform != \"win32\":\n raise ImportError(\"ImageGrab is Windows only\")\n\ntry:\n # built-in driver (1.1.3 and later)\n grabber = Image.core.grabscreen\nexcept AttributeError:\n # stand-alone driver (pil plus)\n import _grabscreen\n grabber = _grabscreen.grab\n\n\ndef grab(bbox=None):\n size, data = grabber()\n im = Image.frombytes(\n \"RGB\", size, data,\n # RGB, 32-bit line padding, origo in lower left corner\n \"raw\", \"BGR\", (size[0]*3 + 3) & -4, -1\n )\n if bbox:\n im = im.crop(bbox)\n return im\n\n\ndef grabclipboard():\n debug = 0 # temporary interface\n data = Image.core.grabclipboard(debug)\n if isinstance(data, bytes):\n from PIL import BmpImagePlugin\n import io\n return BmpImagePlugin.DibImageFile(io.BytesIO(data))\n return data\n", "path": "PIL/ImageGrab.py"}]}
| 945 | 156 |
gh_patches_debug_23519
|
rasdani/github-patches
|
git_diff
|
jazzband__pip-tools-927
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Sync raises IncompatibleRequirements even when environment markers indicate the incompatible requirements to be irrelevant for the current platform
IMO this is simply more of #206, and I've already demonstrated the problem there. But it's been a month and no one has re-opened, so I'm opening this more specific issue to address the problem.
Here's an example `dev-requirements.txt` from the `plumbum` project:
```python
pytest
pytest-cov
pytest-mock
idna<2.8 ; python_version < '2.7'
pycparser<2.18 ; python_version < '2.7'
paramiko<2.4 ; python_version < '2.7'
paramiko ; python_version >= '2.7'
setuptools
wheel ; python_version >= '2.7'
psutil
```
```bash
pip-sync dev-requirements.txt
```
Identical output whether in a Python `3.7.4` or `2.7.16` env:
```
Incompatible requirements found: paramiko (from -r dev-requirements.txt (line 7)) and paramiko<2.4 (from -r dev-requirements.txt (line 6))
```
No packages end up installed aside from `pip-tools` and its deps.
#### Environment Versions
1. Arch Linux
1. Python version: `3.7.4`
1. pip version: `19.2.3`
1. pip-tools version: `4.1.0`
#### Steps to replicate
```bash
echo "paramiko==2.4.0 ; python_version < '2.7'" > mark.txt
echo "paramiko==2.6.0 ; python_version >= '2.7'" >> mark.txt
pip-sync mark.txt
```
Note that this works:
```bash
pip install --no-deps -r mark.txt
```
#### Expected result
`pip-sync` should ignore non-matching requirements when environment markers are present.
#### Actual result
`pip-sync` checks for conflicts as if it wants to install requirements for all platforms.
#### Further notes
```bash
mv mark.txt mark.in
pip-compile --no-header mark.in
```
```python
asn1crypto==1.0.1 # via cryptography
bcrypt==3.1.7 # via paramiko
cffi==1.12.3 # via bcrypt, cryptography, pynacl
cryptography==2.7 # via paramiko
paramiko==2.6.0 ; python_version >= "2.7"
pycparser==2.19 # via cffi
pynacl==1.3.0 # via paramiko
six==1.12.0 # via bcrypt, cryptography, pynacl
```
Currently, compiling such an in-file will only include the compile-time platform's matching reqs. This hides the issue under discussion, and arguably means it's not a bug. But I believe it is generally desired for pip-sync to honor environment markers, as evidenced by the contents of #206 (closed, but not solved), #600 (merged), #459 (replaced), #460 (merged), #518 (open 2yrs), #563 (open 2yrs), #585 (open 2yrs), #896 (open), etc.
This is probably even more relevant for working with a single python version across different platforms.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `piptools/sync.py`
Content:
```
1 import collections
2 import os
3 import sys
4 import tempfile
5 from subprocess import check_call # nosec
6
7 from pip._internal.commands.freeze import DEV_PKGS
8 from pip._internal.utils.compat import stdlib_pkgs
9
10 from . import click
11 from .exceptions import IncompatibleRequirements
12 from .utils import (
13 flat_map,
14 format_requirement,
15 get_hashes_from_ireq,
16 is_url_requirement,
17 key_from_ireq,
18 key_from_req,
19 )
20
21 PACKAGES_TO_IGNORE = (
22 ["-markerlib", "pip", "pip-tools", "pip-review", "pkg-resources"]
23 + list(stdlib_pkgs)
24 + list(DEV_PKGS)
25 )
26
27
28 def dependency_tree(installed_keys, root_key):
29 """
30 Calculate the dependency tree for the package `root_key` and return
31 a collection of all its dependencies. Uses a DFS traversal algorithm.
32
33 `installed_keys` should be a {key: requirement} mapping, e.g.
34 {'django': from_line('django==1.8')}
35 `root_key` should be the key to return the dependency tree for.
36 """
37 dependencies = set()
38 queue = collections.deque()
39
40 if root_key in installed_keys:
41 dep = installed_keys[root_key]
42 queue.append(dep)
43
44 while queue:
45 v = queue.popleft()
46 key = key_from_req(v)
47 if key in dependencies:
48 continue
49
50 dependencies.add(key)
51
52 for dep_specifier in v.requires():
53 dep_name = key_from_req(dep_specifier)
54 if dep_name in installed_keys:
55 dep = installed_keys[dep_name]
56
57 if dep_specifier.specifier.contains(dep.version):
58 queue.append(dep)
59
60 return dependencies
61
62
63 def get_dists_to_ignore(installed):
64 """
65 Returns a collection of package names to ignore when performing pip-sync,
66 based on the currently installed environment. For example, when pip-tools
67 is installed in the local environment, it should be ignored, including all
68 of its dependencies (e.g. click). When pip-tools is not installed
69 locally, click should also be installed/uninstalled depending on the given
70 requirements.
71 """
72 installed_keys = {key_from_req(r): r for r in installed}
73 return list(
74 flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)
75 )
76
77
78 def merge(requirements, ignore_conflicts):
79 by_key = {}
80
81 for ireq in requirements:
82 # Limitation: URL requirements are merged by precise string match, so
83 # "file:///example.zip#egg=example", "file:///example.zip", and
84 # "example==1.0" will not merge with each other
85 key = key_from_ireq(ireq)
86
87 if not ignore_conflicts:
88 existing_ireq = by_key.get(key)
89 if existing_ireq:
90 # NOTE: We check equality here since we can assume that the
91 # requirements are all pinned
92 if ireq.specifier != existing_ireq.specifier:
93 raise IncompatibleRequirements(ireq, existing_ireq)
94
95 # TODO: Always pick the largest specifier in case of a conflict
96 by_key[key] = ireq
97 return by_key.values()
98
99
100 def diff_key_from_ireq(ireq):
101 """
102 Calculate a key for comparing a compiled requirement with installed modules.
103 For URL requirements, only provide a useful key if the url includes
104 #egg=name==version, which will set ireq.req.name and ireq.specifier.
105 Otherwise return ireq.link so the key will not match and the package will
106 reinstall. Reinstall is necessary to ensure that packages will reinstall
107 if the URL is changed but the version is not.
108 """
109 if is_url_requirement(ireq):
110 if (
111 ireq.req
112 and (getattr(ireq.req, "key", None) or getattr(ireq.req, "name", None))
113 and ireq.specifier
114 ):
115 return key_from_ireq(ireq)
116 return str(ireq.link)
117 return key_from_ireq(ireq)
118
119
120 def diff(compiled_requirements, installed_dists):
121 """
122 Calculate which packages should be installed or uninstalled, given a set
123 of compiled requirements and a list of currently installed modules.
124 """
125 requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}
126
127 satisfied = set() # holds keys
128 to_install = set() # holds InstallRequirement objects
129 to_uninstall = set() # holds keys
130
131 pkgs_to_ignore = get_dists_to_ignore(installed_dists)
132 for dist in installed_dists:
133 key = key_from_req(dist)
134 if key not in requirements_lut or not requirements_lut[key].match_markers():
135 to_uninstall.add(key)
136 elif requirements_lut[key].specifier.contains(dist.version):
137 satisfied.add(key)
138
139 for key, requirement in requirements_lut.items():
140 if key not in satisfied and requirement.match_markers():
141 to_install.add(requirement)
142
143 # Make sure to not uninstall any packages that should be ignored
144 to_uninstall -= set(pkgs_to_ignore)
145
146 return (to_install, to_uninstall)
147
148
149 def sync(
150 to_install,
151 to_uninstall,
152 verbose=False,
153 dry_run=False,
154 install_flags=None,
155 ask=False,
156 ):
157 """
158 Install and uninstalls the given sets of modules.
159 """
160 if not to_uninstall and not to_install:
161 if verbose:
162 click.echo("Everything up-to-date")
163 return 0
164
165 pip_flags = []
166 if not verbose:
167 pip_flags += ["-q"]
168
169 if ask:
170 dry_run = True
171
172 if dry_run:
173 if to_uninstall:
174 click.echo("Would uninstall:")
175 for pkg in to_uninstall:
176 click.echo(" {}".format(pkg))
177
178 if to_install:
179 click.echo("Would install:")
180 for ireq in to_install:
181 click.echo(" {}".format(format_requirement(ireq)))
182
183 if ask and click.confirm("Would you like to proceed with these changes?"):
184 dry_run = False
185
186 if not dry_run:
187 if to_uninstall:
188 check_call( # nosec
189 [sys.executable, "-m", "pip", "uninstall", "-y"]
190 + pip_flags
191 + sorted(to_uninstall)
192 )
193
194 if to_install:
195 if install_flags is None:
196 install_flags = []
197 # prepare requirement lines
198 req_lines = []
199 for ireq in sorted(to_install, key=key_from_ireq):
200 ireq_hashes = get_hashes_from_ireq(ireq)
201 req_lines.append(format_requirement(ireq, hashes=ireq_hashes))
202
203 # save requirement lines to a temporary file
204 tmp_req_file = tempfile.NamedTemporaryFile(mode="wt", delete=False)
205 tmp_req_file.write("\n".join(req_lines))
206 tmp_req_file.close()
207
208 try:
209 check_call( # nosec
210 [sys.executable, "-m", "pip", "install", "-r", tmp_req_file.name]
211 + pip_flags
212 + install_flags
213 )
214 finally:
215 os.unlink(tmp_req_file.name)
216
217 return 0
218
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/piptools/sync.py b/piptools/sync.py
--- a/piptools/sync.py
+++ b/piptools/sync.py
@@ -82,18 +82,19 @@
# Limitation: URL requirements are merged by precise string match, so
# "file:///example.zip#egg=example", "file:///example.zip", and
# "example==1.0" will not merge with each other
- key = key_from_ireq(ireq)
-
- if not ignore_conflicts:
- existing_ireq = by_key.get(key)
- if existing_ireq:
- # NOTE: We check equality here since we can assume that the
- # requirements are all pinned
- if ireq.specifier != existing_ireq.specifier:
- raise IncompatibleRequirements(ireq, existing_ireq)
-
- # TODO: Always pick the largest specifier in case of a conflict
- by_key[key] = ireq
+ if ireq.match_markers():
+ key = key_from_ireq(ireq)
+
+ if not ignore_conflicts:
+ existing_ireq = by_key.get(key)
+ if existing_ireq:
+ # NOTE: We check equality here since we can assume that the
+ # requirements are all pinned
+ if ireq.specifier != existing_ireq.specifier:
+ raise IncompatibleRequirements(ireq, existing_ireq)
+
+ # TODO: Always pick the largest specifier in case of a conflict
+ by_key[key] = ireq
return by_key.values()
|
{"golden_diff": "diff --git a/piptools/sync.py b/piptools/sync.py\n--- a/piptools/sync.py\n+++ b/piptools/sync.py\n@@ -82,18 +82,19 @@\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n- key = key_from_ireq(ireq)\n-\n- if not ignore_conflicts:\n- existing_ireq = by_key.get(key)\n- if existing_ireq:\n- # NOTE: We check equality here since we can assume that the\n- # requirements are all pinned\n- if ireq.specifier != existing_ireq.specifier:\n- raise IncompatibleRequirements(ireq, existing_ireq)\n-\n- # TODO: Always pick the largest specifier in case of a conflict\n- by_key[key] = ireq\n+ if ireq.match_markers():\n+ key = key_from_ireq(ireq)\n+\n+ if not ignore_conflicts:\n+ existing_ireq = by_key.get(key)\n+ if existing_ireq:\n+ # NOTE: We check equality here since we can assume that the\n+ # requirements are all pinned\n+ if ireq.specifier != existing_ireq.specifier:\n+ raise IncompatibleRequirements(ireq, existing_ireq)\n+\n+ # TODO: Always pick the largest specifier in case of a conflict\n+ by_key[key] = ireq\n return by_key.values()\n", "issue": "Sync raises IncompatibleRequirements even when environment markers indicate the incompatible requirements to be irrelevant for the current platform\nIMO this is simply more of #206, and I've already demonstrated the problem there. But it's been a month and no one has re-opened, so I'm opening this more specific issue to address the problem.\r\n\r\nHere's an example `dev-requirements.txt` from the `plumbum` project:\r\n\r\n```python\r\npytest\r\npytest-cov\r\npytest-mock\r\nidna<2.8 ; python_version < '2.7'\r\npycparser<2.18 ; python_version < '2.7'\r\nparamiko<2.4 ; python_version < '2.7'\r\nparamiko ; python_version >= '2.7'\r\nsetuptools\r\nwheel ; python_version >= '2.7'\r\npsutil\r\n```\r\n\r\n```bash\r\npip-sync dev-requirements.txt\r\n```\r\n\r\nIdentical output whether in a Python `3.7.4` or `2.7.16` env:\r\n\r\n```\r\nIncompatible requirements found: paramiko (from -r dev-requirements.txt (line 7)) and paramiko<2.4 (from -r dev-requirements.txt (line 6))\r\n```\r\n\r\nNo packages end up installed aside from `pip-tools` and its deps.\r\n\r\n#### Environment Versions\r\n\r\n1. Arch Linux\r\n1. Python version: `3.7.4`\r\n1. pip version: `19.2.3`\r\n1. pip-tools version: `4.1.0`\r\n\r\n#### Steps to replicate\r\n\r\n```bash\r\necho \"paramiko==2.4.0 ; python_version < '2.7'\" > mark.txt\r\necho \"paramiko==2.6.0 ; python_version >= '2.7'\" >> mark.txt\r\npip-sync mark.txt\r\n```\r\n\r\nNote that this works:\r\n\r\n```bash\r\npip install --no-deps -r mark.txt\r\n```\r\n\r\n#### Expected result\r\n\r\n`pip-sync` should ignore non-matching requirements when environment markers are present.\r\n\r\n#### Actual result\r\n\r\n`pip-sync` checks for conflicts as if it wants to install requirements for all platforms.\r\n\r\n#### Further notes\r\n\r\n```bash\r\nmv mark.txt mark.in\r\npip-compile --no-header mark.in\r\n```\r\n\r\n```python\r\nasn1crypto==1.0.1 # via cryptography\r\nbcrypt==3.1.7 # via paramiko\r\ncffi==1.12.3 # via bcrypt, cryptography, pynacl\r\ncryptography==2.7 # via paramiko\r\nparamiko==2.6.0 ; python_version >= \"2.7\"\r\npycparser==2.19 # via cffi\r\npynacl==1.3.0 # via paramiko\r\nsix==1.12.0 # via bcrypt, cryptography, pynacl\r\n```\r\n\r\nCurrently, compiling such an in-file will only include the compile-time platform's matching reqs. This hides the issue under discussion, and arguably means it's not a bug. But I believe it is generally desired for pip-sync to honor environment markers, as evidenced by the contents of #206 (closed, but not solved), #600 (merged), #459 (replaced), #460 (merged), #518 (open 2yrs), #563 (open 2yrs), #585 (open 2yrs), #896 (open), etc.\r\n\r\nThis is probably even more relevant for working with a single python version across different platforms.\n", "before_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call # nosec\n\nfrom pip._internal.commands.freeze import DEV_PKGS\nfrom pip._internal.utils.compat import stdlib_pkgs\n\nfrom . import click\nfrom .exceptions import IncompatibleRequirements\nfrom .utils import (\n flat_map,\n format_requirement,\n get_hashes_from_ireq,\n is_url_requirement,\n key_from_ireq,\n key_from_req,\n)\n\nPACKAGES_TO_IGNORE = (\n [\"-markerlib\", \"pip\", \"pip-tools\", \"pip-review\", \"pkg-resources\"]\n + list(stdlib_pkgs)\n + list(DEV_PKGS)\n)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(\n flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)\n )\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n key = key_from_ireq(ireq)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n return by_key.values()\n\n\ndef diff_key_from_ireq(ireq):\n \"\"\"\n Calculate a key for comparing a compiled requirement with installed modules.\n For URL requirements, only provide a useful key if the url includes\n #egg=name==version, which will set ireq.req.name and ireq.specifier.\n Otherwise return ireq.link so the key will not match and the package will\n reinstall. Reinstall is necessary to ensure that packages will reinstall\n if the URL is changed but the version is not.\n \"\"\"\n if is_url_requirement(ireq):\n if (\n ireq.req\n and (getattr(ireq.req, \"key\", None) or getattr(ireq.req, \"name\", None))\n and ireq.specifier\n ):\n return key_from_ireq(ireq)\n return str(ireq.link)\n return key_from_ireq(ireq)\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(\n to_install,\n to_uninstall,\n verbose=False,\n dry_run=False,\n install_flags=None,\n ask=False,\n):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n if verbose:\n click.echo(\"Everything up-to-date\")\n return 0\n\n pip_flags = []\n if not verbose:\n pip_flags += [\"-q\"]\n\n if ask:\n dry_run = True\n\n if dry_run:\n if to_uninstall:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n\n if to_install:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n\n if not dry_run:\n if to_uninstall:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\"]\n + pip_flags\n + sorted(to_uninstall)\n )\n\n if to_install:\n if install_flags is None:\n install_flags = []\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode=\"wt\", delete=False)\n tmp_req_file.write(\"\\n\".join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"install\", \"-r\", tmp_req_file.name]\n + pip_flags\n + install_flags\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}], "after_files": [{"content": "import collections\nimport os\nimport sys\nimport tempfile\nfrom subprocess import check_call # nosec\n\nfrom pip._internal.commands.freeze import DEV_PKGS\nfrom pip._internal.utils.compat import stdlib_pkgs\n\nfrom . import click\nfrom .exceptions import IncompatibleRequirements\nfrom .utils import (\n flat_map,\n format_requirement,\n get_hashes_from_ireq,\n is_url_requirement,\n key_from_ireq,\n key_from_req,\n)\n\nPACKAGES_TO_IGNORE = (\n [\"-markerlib\", \"pip\", \"pip-tools\", \"pip-review\", \"pkg-resources\"]\n + list(stdlib_pkgs)\n + list(DEV_PKGS)\n)\n\n\ndef dependency_tree(installed_keys, root_key):\n \"\"\"\n Calculate the dependency tree for the package `root_key` and return\n a collection of all its dependencies. Uses a DFS traversal algorithm.\n\n `installed_keys` should be a {key: requirement} mapping, e.g.\n {'django': from_line('django==1.8')}\n `root_key` should be the key to return the dependency tree for.\n \"\"\"\n dependencies = set()\n queue = collections.deque()\n\n if root_key in installed_keys:\n dep = installed_keys[root_key]\n queue.append(dep)\n\n while queue:\n v = queue.popleft()\n key = key_from_req(v)\n if key in dependencies:\n continue\n\n dependencies.add(key)\n\n for dep_specifier in v.requires():\n dep_name = key_from_req(dep_specifier)\n if dep_name in installed_keys:\n dep = installed_keys[dep_name]\n\n if dep_specifier.specifier.contains(dep.version):\n queue.append(dep)\n\n return dependencies\n\n\ndef get_dists_to_ignore(installed):\n \"\"\"\n Returns a collection of package names to ignore when performing pip-sync,\n based on the currently installed environment. For example, when pip-tools\n is installed in the local environment, it should be ignored, including all\n of its dependencies (e.g. click). When pip-tools is not installed\n locally, click should also be installed/uninstalled depending on the given\n requirements.\n \"\"\"\n installed_keys = {key_from_req(r): r for r in installed}\n return list(\n flat_map(lambda req: dependency_tree(installed_keys, req), PACKAGES_TO_IGNORE)\n )\n\n\ndef merge(requirements, ignore_conflicts):\n by_key = {}\n\n for ireq in requirements:\n # Limitation: URL requirements are merged by precise string match, so\n # \"file:///example.zip#egg=example\", \"file:///example.zip\", and\n # \"example==1.0\" will not merge with each other\n if ireq.match_markers():\n key = key_from_ireq(ireq)\n\n if not ignore_conflicts:\n existing_ireq = by_key.get(key)\n if existing_ireq:\n # NOTE: We check equality here since we can assume that the\n # requirements are all pinned\n if ireq.specifier != existing_ireq.specifier:\n raise IncompatibleRequirements(ireq, existing_ireq)\n\n # TODO: Always pick the largest specifier in case of a conflict\n by_key[key] = ireq\n return by_key.values()\n\n\ndef diff_key_from_ireq(ireq):\n \"\"\"\n Calculate a key for comparing a compiled requirement with installed modules.\n For URL requirements, only provide a useful key if the url includes\n #egg=name==version, which will set ireq.req.name and ireq.specifier.\n Otherwise return ireq.link so the key will not match and the package will\n reinstall. Reinstall is necessary to ensure that packages will reinstall\n if the URL is changed but the version is not.\n \"\"\"\n if is_url_requirement(ireq):\n if (\n ireq.req\n and (getattr(ireq.req, \"key\", None) or getattr(ireq.req, \"name\", None))\n and ireq.specifier\n ):\n return key_from_ireq(ireq)\n return str(ireq.link)\n return key_from_ireq(ireq)\n\n\ndef diff(compiled_requirements, installed_dists):\n \"\"\"\n Calculate which packages should be installed or uninstalled, given a set\n of compiled requirements and a list of currently installed modules.\n \"\"\"\n requirements_lut = {diff_key_from_ireq(r): r for r in compiled_requirements}\n\n satisfied = set() # holds keys\n to_install = set() # holds InstallRequirement objects\n to_uninstall = set() # holds keys\n\n pkgs_to_ignore = get_dists_to_ignore(installed_dists)\n for dist in installed_dists:\n key = key_from_req(dist)\n if key not in requirements_lut or not requirements_lut[key].match_markers():\n to_uninstall.add(key)\n elif requirements_lut[key].specifier.contains(dist.version):\n satisfied.add(key)\n\n for key, requirement in requirements_lut.items():\n if key not in satisfied and requirement.match_markers():\n to_install.add(requirement)\n\n # Make sure to not uninstall any packages that should be ignored\n to_uninstall -= set(pkgs_to_ignore)\n\n return (to_install, to_uninstall)\n\n\ndef sync(\n to_install,\n to_uninstall,\n verbose=False,\n dry_run=False,\n install_flags=None,\n ask=False,\n):\n \"\"\"\n Install and uninstalls the given sets of modules.\n \"\"\"\n if not to_uninstall and not to_install:\n if verbose:\n click.echo(\"Everything up-to-date\")\n return 0\n\n pip_flags = []\n if not verbose:\n pip_flags += [\"-q\"]\n\n if ask:\n dry_run = True\n\n if dry_run:\n if to_uninstall:\n click.echo(\"Would uninstall:\")\n for pkg in to_uninstall:\n click.echo(\" {}\".format(pkg))\n\n if to_install:\n click.echo(\"Would install:\")\n for ireq in to_install:\n click.echo(\" {}\".format(format_requirement(ireq)))\n\n if ask and click.confirm(\"Would you like to proceed with these changes?\"):\n dry_run = False\n\n if not dry_run:\n if to_uninstall:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\"]\n + pip_flags\n + sorted(to_uninstall)\n )\n\n if to_install:\n if install_flags is None:\n install_flags = []\n # prepare requirement lines\n req_lines = []\n for ireq in sorted(to_install, key=key_from_ireq):\n ireq_hashes = get_hashes_from_ireq(ireq)\n req_lines.append(format_requirement(ireq, hashes=ireq_hashes))\n\n # save requirement lines to a temporary file\n tmp_req_file = tempfile.NamedTemporaryFile(mode=\"wt\", delete=False)\n tmp_req_file.write(\"\\n\".join(req_lines))\n tmp_req_file.close()\n\n try:\n check_call( # nosec\n [sys.executable, \"-m\", \"pip\", \"install\", \"-r\", tmp_req_file.name]\n + pip_flags\n + install_flags\n )\n finally:\n os.unlink(tmp_req_file.name)\n\n return 0\n", "path": "piptools/sync.py"}]}
| 3,132 | 362 |
gh_patches_debug_1942
|
rasdani/github-patches
|
git_diff
|
ocf__ocfweb-72
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Add "edit this page" link on docs?
It would link to the GitHub editor page.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `ocfweb/docs/doc.py`
Content:
```
1 from collections import namedtuple
2
3
4 class Document(namedtuple('Document', ['name', 'title', 'render'])):
5
6 @property
7 def category(self):
8 """Return full category path of the document.
9
10 For example, "/" or "/staff/backend/".
11 """
12 return self.name.rsplit('/', 1)[0] + '/'
13
14 @property
15 def category_for_sidebar(self):
16 """Return the category to show similar pages for in the sidebar.
17
18 If this page isn't at the root category, we just return this page's
19 category.
20
21 If this page is at the root category, we return the category rooted at
22 this page (which may or may not have any pages in it).
23 """
24 if self.category == '/':
25 return self.name + '/'
26 else:
27 return self.category
28
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py
--- a/ocfweb/docs/doc.py
+++ b/ocfweb/docs/doc.py
@@ -25,3 +25,12 @@
return self.name + '/'
else:
return self.category
+
+ @property
+ def edit_url(self):
+ """Return a GitHub edit URL for this page."""
+ return (
+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +
+ self.name +
+ '.md'
+ )
|
{"golden_diff": "diff --git a/ocfweb/docs/doc.py b/ocfweb/docs/doc.py\n--- a/ocfweb/docs/doc.py\n+++ b/ocfweb/docs/doc.py\n@@ -25,3 +25,12 @@\n return self.name + '/'\n else:\n return self.category\n+\n+ @property\n+ def edit_url(self):\n+ \"\"\"Return a GitHub edit URL for this page.\"\"\"\n+ return (\n+ 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +\n+ self.name +\n+ '.md'\n+ )\n", "issue": "Add \"edit this page\" link on docs?\nIt would link to the GitHub editor page.\n\n", "before_files": [{"content": "from collections import namedtuple\n\n\nclass Document(namedtuple('Document', ['name', 'title', 'render'])):\n\n @property\n def category(self):\n \"\"\"Return full category path of the document.\n\n For example, \"/\" or \"/staff/backend/\".\n \"\"\"\n return self.name.rsplit('/', 1)[0] + '/'\n\n @property\n def category_for_sidebar(self):\n \"\"\"Return the category to show similar pages for in the sidebar.\n\n If this page isn't at the root category, we just return this page's\n category.\n\n If this page is at the root category, we return the category rooted at\n this page (which may or may not have any pages in it).\n \"\"\"\n if self.category == '/':\n return self.name + '/'\n else:\n return self.category\n", "path": "ocfweb/docs/doc.py"}], "after_files": [{"content": "from collections import namedtuple\n\n\nclass Document(namedtuple('Document', ['name', 'title', 'render'])):\n\n @property\n def category(self):\n \"\"\"Return full category path of the document.\n\n For example, \"/\" or \"/staff/backend/\".\n \"\"\"\n return self.name.rsplit('/', 1)[0] + '/'\n\n @property\n def category_for_sidebar(self):\n \"\"\"Return the category to show similar pages for in the sidebar.\n\n If this page isn't at the root category, we just return this page's\n category.\n\n If this page is at the root category, we return the category rooted at\n this page (which may or may not have any pages in it).\n \"\"\"\n if self.category == '/':\n return self.name + '/'\n else:\n return self.category\n\n @property\n def edit_url(self):\n \"\"\"Return a GitHub edit URL for this page.\"\"\"\n return (\n 'https://github.com/ocf/ocfweb/edit/master/ocfweb/docs/docs' +\n self.name +\n '.md'\n )\n", "path": "ocfweb/docs/doc.py"}]}
| 502 | 133 |
gh_patches_debug_2911
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-5067
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
dvc version: does not follow symlinks
# Bug Report
## Description
This is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.
```
DVC version: 1.10.2 (pip)
---------------------------------
Platform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10
Supports: All remotes
Cache types: symlink
Cache directory: nfs4 on storage:/home
Caches: local
Remotes: s3
Workspace directory: nfs4 on storage:/home
Repo: dvc, git
```
However, I do have a `~/.config/dvc/config` file that overrides this:
```
[core]
experiments = true
[cache]
type = "reflink,symlink,copy"
protected = true
dir = /home/jc/ssd_cache/dvc_cache
[feature]
parametrization = true
```
And the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/info.py`
Content:
```
1 import itertools
2 import os
3 import pathlib
4 import platform
5 import uuid
6
7 from dvc.exceptions import DvcException, NotDvcRepoError
8 from dvc.repo import Repo
9 from dvc.scm.base import SCMError
10 from dvc.system import System
11 from dvc.tree import TREES, get_tree_cls, get_tree_config
12 from dvc.utils import error_link
13 from dvc.utils.pkg import PKG
14 from dvc.version import __version__
15
16 try:
17 import psutil
18 except ImportError:
19 psutil = None
20
21 if PKG is None:
22 package = ""
23 else:
24 package = f"({PKG})"
25
26
27 def get_dvc_info():
28 info = [
29 f"DVC version: {__version__} {package}",
30 "---------------------------------",
31 f"Platform: Python {platform.python_version()} on "
32 f"{platform.platform()}",
33 f"Supports: {_get_supported_remotes()}",
34 ]
35
36 try:
37 repo = Repo()
38
39 # cache_dir might not exist yet (e.g. after `dvc init`), and we
40 # can't auto-create it, as it might cause issues if the user
41 # later decides to enable shared cache mode with
42 # `dvc config cache.shared group`.
43 if os.path.exists(repo.cache.local.cache_dir):
44 info.append(
45 "Cache types: {}".format(_get_linktype_support_info(repo))
46 )
47 if psutil:
48 fs_type = get_fs_type(repo.cache.local.cache_dir)
49 info.append(f"Cache directory: {fs_type}")
50 else:
51 info.append("Cache types: " + error_link("no-dvc-cache"))
52
53 info.append(f"Caches: {_get_caches(repo.cache)}")
54
55 info.append(f"Remotes: {_get_remotes(repo.config)}")
56
57 except NotDvcRepoError:
58 pass
59 except SCMError:
60 info.append("Repo: dvc, git (broken)")
61 else:
62 root_directory = repo.root_dir
63 if psutil:
64 fs_root = get_fs_type(os.path.abspath(root_directory))
65 info.append(f"Workspace directory: {fs_root}")
66 info.append("Repo: {}".format(_get_dvc_repo_info(repo)))
67 return "\n".join(info)
68
69
70 def _get_caches(cache):
71 caches = (
72 cache_type
73 for cache_type, cache_instance in cache.by_scheme()
74 if cache_instance
75 )
76
77 # Caches will be always non-empty including the local cache
78 return ", ".join(caches)
79
80
81 def _get_remotes(config):
82 schemes = (
83 get_tree_cls(get_tree_config(config, name=remote)).scheme
84 for remote in config["remote"]
85 )
86
87 return ", ".join(schemes) or "None"
88
89
90 def _get_linktype_support_info(repo):
91
92 links = {
93 "reflink": (System.reflink, None),
94 "hardlink": (System.hardlink, System.is_hardlink),
95 "symlink": (System.symlink, System.is_symlink),
96 }
97
98 fname = "." + str(uuid.uuid4())
99 src = os.path.join(repo.cache.local.cache_dir, fname)
100 open(src, "w").close()
101 dst = os.path.join(repo.root_dir, fname)
102
103 cache = []
104
105 for name, (link, is_link) in links.items():
106 try:
107 link(src, dst)
108 status = "supported"
109 if is_link and not is_link(dst):
110 status = "broken"
111 os.unlink(dst)
112 except DvcException:
113 status = "not supported"
114
115 if status == "supported":
116 cache.append(name)
117 os.remove(src)
118
119 return ", ".join(cache)
120
121
122 def _get_supported_remotes():
123
124 supported_remotes = []
125 for tree_cls in TREES:
126 if not tree_cls.get_missing_deps():
127 supported_remotes.append(tree_cls.scheme)
128
129 if len(supported_remotes) == len(TREES):
130 return "All remotes"
131
132 if len(supported_remotes) == 1:
133 return supported_remotes
134
135 return ", ".join(supported_remotes)
136
137
138 def get_fs_type(path):
139
140 partition = {
141 pathlib.Path(part.mountpoint): (part.fstype + " on " + part.device)
142 for part in psutil.disk_partitions(all=True)
143 }
144
145 path = pathlib.Path(path)
146
147 for parent in itertools.chain([path], path.parents):
148 if parent in partition:
149 return partition[parent]
150 return ("unknown", "none")
151
152
153 def _get_dvc_repo_info(self):
154 if self.config.get("core", {}).get("no_scm", False):
155 return "dvc (no_scm)"
156
157 if self.root_dir != self.scm.root_dir:
158 return "dvc (subdir), git"
159
160 return "dvc, git"
161
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/info.py b/dvc/info.py
--- a/dvc/info.py
+++ b/dvc/info.py
@@ -142,7 +142,8 @@
for part in psutil.disk_partitions(all=True)
}
- path = pathlib.Path(path)
+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065
+ path = pathlib.Path(path).resolve()
for parent in itertools.chain([path], path.parents):
if parent in partition:
|
{"golden_diff": "diff --git a/dvc/info.py b/dvc/info.py\n--- a/dvc/info.py\n+++ b/dvc/info.py\n@@ -142,7 +142,8 @@\n for part in psutil.disk_partitions(all=True)\n }\n \n- path = pathlib.Path(path)\n+ # need to follow the symlink: https://github.com/iterative/dvc/issues/5065\n+ path = pathlib.Path(path).resolve()\n \n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n", "issue": "dvc version: does not follow symlinks\n# Bug Report\r\n\r\n## Description\r\n\r\nThis is the `dvc version` output, where it says the cache directory is `nfs4 on storage:/home` and cache type is `symlink`.\r\n\r\n```\r\nDVC version: 1.10.2 (pip)\r\n---------------------------------\r\nPlatform: Python 3.8.3 on Linux-5.4.0-54-generic-x86_64-with-glibc2.10\r\nSupports: All remotes\r\nCache types: symlink\r\nCache directory: nfs4 on storage:/home\r\nCaches: local\r\nRemotes: s3\r\nWorkspace directory: nfs4 on storage:/home\r\nRepo: dvc, git\r\n```\r\n\r\nHowever, I do have a `~/.config/dvc/config` file that overrides this:\r\n\r\n```\r\n[core]\r\n experiments = true\r\n[cache]\r\n type = \"reflink,symlink,copy\"\r\n protected = true\r\n dir = /home/jc/ssd_cache/dvc_cache\r\n[feature]\r\n parametrization = true\r\n```\r\n\r\nAnd the actual cache dir is `/home/jc/ssd_cache/dvc_cache` as I've specified instead of `nfs4 on storage:/home`\n", "before_files": [{"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n path = pathlib.Path(path)\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}], "after_files": [{"content": "import itertools\nimport os\nimport pathlib\nimport platform\nimport uuid\n\nfrom dvc.exceptions import DvcException, NotDvcRepoError\nfrom dvc.repo import Repo\nfrom dvc.scm.base import SCMError\nfrom dvc.system import System\nfrom dvc.tree import TREES, get_tree_cls, get_tree_config\nfrom dvc.utils import error_link\nfrom dvc.utils.pkg import PKG\nfrom dvc.version import __version__\n\ntry:\n import psutil\nexcept ImportError:\n psutil = None\n\nif PKG is None:\n package = \"\"\nelse:\n package = f\"({PKG})\"\n\n\ndef get_dvc_info():\n info = [\n f\"DVC version: {__version__} {package}\",\n \"---------------------------------\",\n f\"Platform: Python {platform.python_version()} on \"\n f\"{platform.platform()}\",\n f\"Supports: {_get_supported_remotes()}\",\n ]\n\n try:\n repo = Repo()\n\n # cache_dir might not exist yet (e.g. after `dvc init`), and we\n # can't auto-create it, as it might cause issues if the user\n # later decides to enable shared cache mode with\n # `dvc config cache.shared group`.\n if os.path.exists(repo.cache.local.cache_dir):\n info.append(\n \"Cache types: {}\".format(_get_linktype_support_info(repo))\n )\n if psutil:\n fs_type = get_fs_type(repo.cache.local.cache_dir)\n info.append(f\"Cache directory: {fs_type}\")\n else:\n info.append(\"Cache types: \" + error_link(\"no-dvc-cache\"))\n\n info.append(f\"Caches: {_get_caches(repo.cache)}\")\n\n info.append(f\"Remotes: {_get_remotes(repo.config)}\")\n\n except NotDvcRepoError:\n pass\n except SCMError:\n info.append(\"Repo: dvc, git (broken)\")\n else:\n root_directory = repo.root_dir\n if psutil:\n fs_root = get_fs_type(os.path.abspath(root_directory))\n info.append(f\"Workspace directory: {fs_root}\")\n info.append(\"Repo: {}\".format(_get_dvc_repo_info(repo)))\n return \"\\n\".join(info)\n\n\ndef _get_caches(cache):\n caches = (\n cache_type\n for cache_type, cache_instance in cache.by_scheme()\n if cache_instance\n )\n\n # Caches will be always non-empty including the local cache\n return \", \".join(caches)\n\n\ndef _get_remotes(config):\n schemes = (\n get_tree_cls(get_tree_config(config, name=remote)).scheme\n for remote in config[\"remote\"]\n )\n\n return \", \".join(schemes) or \"None\"\n\n\ndef _get_linktype_support_info(repo):\n\n links = {\n \"reflink\": (System.reflink, None),\n \"hardlink\": (System.hardlink, System.is_hardlink),\n \"symlink\": (System.symlink, System.is_symlink),\n }\n\n fname = \".\" + str(uuid.uuid4())\n src = os.path.join(repo.cache.local.cache_dir, fname)\n open(src, \"w\").close()\n dst = os.path.join(repo.root_dir, fname)\n\n cache = []\n\n for name, (link, is_link) in links.items():\n try:\n link(src, dst)\n status = \"supported\"\n if is_link and not is_link(dst):\n status = \"broken\"\n os.unlink(dst)\n except DvcException:\n status = \"not supported\"\n\n if status == \"supported\":\n cache.append(name)\n os.remove(src)\n\n return \", \".join(cache)\n\n\ndef _get_supported_remotes():\n\n supported_remotes = []\n for tree_cls in TREES:\n if not tree_cls.get_missing_deps():\n supported_remotes.append(tree_cls.scheme)\n\n if len(supported_remotes) == len(TREES):\n return \"All remotes\"\n\n if len(supported_remotes) == 1:\n return supported_remotes\n\n return \", \".join(supported_remotes)\n\n\ndef get_fs_type(path):\n\n partition = {\n pathlib.Path(part.mountpoint): (part.fstype + \" on \" + part.device)\n for part in psutil.disk_partitions(all=True)\n }\n\n # need to follow the symlink: https://github.com/iterative/dvc/issues/5065\n path = pathlib.Path(path).resolve()\n\n for parent in itertools.chain([path], path.parents):\n if parent in partition:\n return partition[parent]\n return (\"unknown\", \"none\")\n\n\ndef _get_dvc_repo_info(self):\n if self.config.get(\"core\", {}).get(\"no_scm\", False):\n return \"dvc (no_scm)\"\n\n if self.root_dir != self.scm.root_dir:\n return \"dvc (subdir), git\"\n\n return \"dvc, git\"\n", "path": "dvc/info.py"}]}
| 1,939 | 118 |
gh_patches_debug_15501
|
rasdani/github-patches
|
git_diff
|
wagtail__wagtail-8708
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Revision model rename breaks Page revision foreign key on SQLite
### Issue Summary
#8441 renamed the PageRevision model to Revision, which included a migration with [a `RenameModel` step](https://github.com/wagtail/wagtail/blob/1f43d8ef51e455b92e42447fdc190d5ec83ec53c/wagtail/migrations/0070_rename_pagerevision_revision.py#L15-L18).
On my local machine, running against SQLite in the default configuration, this renamed the table but didn't update the foreign key from the Page model. Looking at the SQL for the migration starts with:
```
% ./manage.py sqlmigrate wagtailcore 0070
BEGIN;
--
-- Rename model PageRevision to Revision
--
ALTER TABLE "wagtailcore_pagerevision" RENAME TO "wagtailcore_revision";
...
```
But if I then check the `live_revision_id` foreign key on the Page model in SQLite, it hasn't been updated, and still points to the now-renamed `wagtailcore_pagerevision` table.
```
% sqlite3 db.sqlite3
SQLite version 3.32.3 2020-06-18 14:16:19
Enter ".help" for usage hints.
sqlite> PRAGMA foreign_key_list('wagtailcore_page');
...
3|0|wagtailcore_pagerevision|live_revision_id|id|NO ACTION|NO ACTION|NONE
...
```
It looks like I'm getting hit by the `ALTER TABLE RENAME` issue described in the SQLite docs [here](https://www.sqlite.org/draft/lang_altertable.html#alter_table_rename):
> Beginning with version 3.26.0, FOREIGN KEY constraints are always converted when a table is renamed, unless the [PRAGMA legacy_alter_table=ON](https://www.sqlite.org/draft/pragma.html#pragma_legacy_alter_table) setting is engaged.
My `PRAGMA`s are defined thusly (the default values, apparently, for this version of SQLite on MacOS):
```
sqlite> PRAGMA legacy_alter_table;
1
sqlite> PRAGMA foreign_keys;
0
```
I note [this commit](https://github.com/django/django/commit/063cf98d3a6839f40c423cbd845def429c5cf0ce) that just went into Django (the dev version for 4.1) that explicitly disables `legacy_alter_table`; I wonder if I am hitting some edge case that this would fix.
Wagtail seems to have only [one other instance of `RenameModel`](https://github.com/wagtail/wagtail/blob/716bf92c2dc2da2aca5e8f5aa6768b5b087cd4b0/wagtail/contrib/search_promotions/migrations/0001_initial.py#L65) for the SearchPromotion model, but I don't think we have any foreign keys pointing to that. So this might be the first time this combination has hit Wagtail.
### Steps to Reproduce
1. Start a new project with `wagtail start myproject`
2. `cd myproject`
3. `./manage.py migrate`
4. `./manage.py createsuperuser` and create an admin user.
5. `./manage.py runserver`
6. Vist http://localhost:8000/admin/pages/add/home/homepage/3/ to create a new page (you'll be asked to log in first)
7. Fill in the title field with something like "Test". Click "Save Draft". You'll get an error: "OperationalError at /admin/pages/add/home/homepage/3/, no such table: main.wagtailcore_pagerevision"
- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes
### Technical details
- Mac OS version: Big Sur 11.6.5
- SQLite3 version: 3.32.3 2020-06-18 14:16:19 02c344aceaea0d177dd42e62c8541e3cab4a26c757ba33b3a31a43ccc7d4aapl
- Python version: 3.10.4
- Django version: 4.0.5, also happens on 3.2
- Wagtail version: main (1f43d8ef51e455b92e42447fdc190d5ec83ec53c)
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `wagtail/migrations/0070_rename_pagerevision_revision.py`
Content:
```
1 # Generated by Django 4.0.3 on 2022-04-26 12:31
2
3 from django.conf import settings
4 from django.db import migrations, models
5
6
7 class Migration(migrations.Migration):
8
9 dependencies = [
10 migrations.swappable_dependency(settings.AUTH_USER_MODEL),
11 ("wagtailcore", "0069_log_entry_jsonfield"),
12 ]
13
14 operations = [
15 migrations.RenameModel(
16 old_name="PageRevision",
17 new_name="Revision",
18 ),
19 migrations.AlterModelOptions(
20 name="revision",
21 options={"verbose_name": "revision", "verbose_name_plural": "revisions"},
22 ),
23 migrations.AlterField(
24 model_name="revision",
25 name="page",
26 field=models.CharField(max_length=255, verbose_name="object id"),
27 ),
28 migrations.RenameField(
29 model_name="revision",
30 old_name="page",
31 new_name="object_id",
32 ),
33 migrations.AddField(
34 model_name="revision",
35 name="content_type",
36 field=models.ForeignKey(
37 null=True,
38 on_delete=models.CASCADE,
39 related_name="+",
40 to="contenttypes.contenttype",
41 ),
42 ),
43 migrations.AddField(
44 model_name="revision",
45 name="base_content_type",
46 field=models.ForeignKey(
47 null=True,
48 on_delete=models.CASCADE,
49 related_name="+",
50 to="contenttypes.contenttype",
51 ),
52 ),
53 migrations.AddIndex(
54 model_name="revision",
55 index=models.Index(
56 fields=["content_type", "object_id"],
57 name="content_object_idx",
58 ),
59 ),
60 migrations.AddIndex(
61 model_name="revision",
62 index=models.Index(
63 fields=["base_content_type", "object_id"],
64 name="base_content_object_idx",
65 ),
66 ),
67 ]
68
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/wagtail/migrations/0070_rename_pagerevision_revision.py b/wagtail/migrations/0070_rename_pagerevision_revision.py
--- a/wagtail/migrations/0070_rename_pagerevision_revision.py
+++ b/wagtail/migrations/0070_rename_pagerevision_revision.py
@@ -4,6 +4,12 @@
from django.db import migrations, models
+def disable_sqlite_legacy_alter_table(apps, schema_editor):
+ # Fix for https://github.com/wagtail/wagtail/issues/8635
+ if schema_editor.connection.vendor == "sqlite":
+ schema_editor.execute("PRAGMA legacy_alter_table = OFF")
+
+
class Migration(migrations.Migration):
dependencies = [
@@ -12,6 +18,10 @@
]
operations = [
+ migrations.RunPython(
+ disable_sqlite_legacy_alter_table,
+ migrations.RunPython.noop,
+ ),
migrations.RenameModel(
old_name="PageRevision",
new_name="Revision",
|
{"golden_diff": "diff --git a/wagtail/migrations/0070_rename_pagerevision_revision.py b/wagtail/migrations/0070_rename_pagerevision_revision.py\n--- a/wagtail/migrations/0070_rename_pagerevision_revision.py\n+++ b/wagtail/migrations/0070_rename_pagerevision_revision.py\n@@ -4,6 +4,12 @@\n from django.db import migrations, models\n \n \n+def disable_sqlite_legacy_alter_table(apps, schema_editor):\n+ # Fix for https://github.com/wagtail/wagtail/issues/8635\n+ if schema_editor.connection.vendor == \"sqlite\":\n+ schema_editor.execute(\"PRAGMA legacy_alter_table = OFF\")\n+\n+\n class Migration(migrations.Migration):\n \n dependencies = [\n@@ -12,6 +18,10 @@\n ]\n \n operations = [\n+ migrations.RunPython(\n+ disable_sqlite_legacy_alter_table,\n+ migrations.RunPython.noop,\n+ ),\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n", "issue": "Revision model rename breaks Page revision foreign key on SQLite\n### Issue Summary\r\n\r\n#8441 renamed the PageRevision model to Revision, which included a migration with [a `RenameModel` step](https://github.com/wagtail/wagtail/blob/1f43d8ef51e455b92e42447fdc190d5ec83ec53c/wagtail/migrations/0070_rename_pagerevision_revision.py#L15-L18).\r\n\r\nOn my local machine, running against SQLite in the default configuration, this renamed the table but didn't update the foreign key from the Page model. Looking at the SQL for the migration starts with:\r\n\r\n```\r\n% ./manage.py sqlmigrate wagtailcore 0070\r\nBEGIN;\r\n--\r\n-- Rename model PageRevision to Revision\r\n--\r\nALTER TABLE \"wagtailcore_pagerevision\" RENAME TO \"wagtailcore_revision\";\r\n...\r\n```\r\n\r\nBut if I then check the `live_revision_id` foreign key on the Page model in SQLite, it hasn't been updated, and still points to the now-renamed `wagtailcore_pagerevision` table.\r\n\r\n```\r\n% sqlite3 db.sqlite3 \r\nSQLite version 3.32.3 2020-06-18 14:16:19\r\nEnter \".help\" for usage hints.\r\nsqlite> PRAGMA foreign_key_list('wagtailcore_page');\r\n...\r\n3|0|wagtailcore_pagerevision|live_revision_id|id|NO ACTION|NO ACTION|NONE\r\n...\r\n```\r\n\r\nIt looks like I'm getting hit by the `ALTER TABLE RENAME` issue described in the SQLite docs [here](https://www.sqlite.org/draft/lang_altertable.html#alter_table_rename):\r\n\r\n> Beginning with version 3.26.0, FOREIGN KEY constraints are always converted when a table is renamed, unless the [PRAGMA legacy_alter_table=ON](https://www.sqlite.org/draft/pragma.html#pragma_legacy_alter_table) setting is engaged.\r\n\r\nMy `PRAGMA`s are defined thusly (the default values, apparently, for this version of SQLite on MacOS):\r\n\r\n```\r\nsqlite> PRAGMA legacy_alter_table;\r\n1\r\nsqlite> PRAGMA foreign_keys;\r\n0\r\n```\r\n\r\nI note [this commit](https://github.com/django/django/commit/063cf98d3a6839f40c423cbd845def429c5cf0ce) that just went into Django (the dev version for 4.1) that explicitly disables `legacy_alter_table`; I wonder if I am hitting some edge case that this would fix.\r\n\r\nWagtail seems to have only [one other instance of `RenameModel`](https://github.com/wagtail/wagtail/blob/716bf92c2dc2da2aca5e8f5aa6768b5b087cd4b0/wagtail/contrib/search_promotions/migrations/0001_initial.py#L65) for the SearchPromotion model, but I don't think we have any foreign keys pointing to that. So this might be the first time this combination has hit Wagtail.\r\n\r\n### Steps to Reproduce\r\n\r\n1. Start a new project with `wagtail start myproject`\r\n2. `cd myproject`\r\n3. `./manage.py migrate`\r\n4. `./manage.py createsuperuser` and create an admin user.\r\n5. `./manage.py runserver`\r\n6. Vist http://localhost:8000/admin/pages/add/home/homepage/3/ to create a new page (you'll be asked to log in first)\r\n7. Fill in the title field with something like \"Test\". Click \"Save Draft\". You'll get an error: \"OperationalError at /admin/pages/add/home/homepage/3/, no such table: main.wagtailcore_pagerevision\"\r\n\r\n- I have confirmed that this issue can be reproduced as described on a fresh Wagtail project: yes\r\n\r\n### Technical details\r\n\r\n- Mac OS version: Big Sur 11.6.5\r\n- SQLite3 version: 3.32.3 2020-06-18 14:16:19 02c344aceaea0d177dd42e62c8541e3cab4a26c757ba33b3a31a43ccc7d4aapl\r\n- Python version: 3.10.4\r\n- Django version: 4.0.5, also happens on 3.2\r\n- Wagtail version: main (1f43d8ef51e455b92e42447fdc190d5ec83ec53c)\r\n\n", "before_files": [{"content": "# Generated by Django 4.0.3 on 2022-04-26 12:31\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n (\"wagtailcore\", \"0069_log_entry_jsonfield\"),\n ]\n\n operations = [\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n ),\n migrations.AlterModelOptions(\n name=\"revision\",\n options={\"verbose_name\": \"revision\", \"verbose_name_plural\": \"revisions\"},\n ),\n migrations.AlterField(\n model_name=\"revision\",\n name=\"page\",\n field=models.CharField(max_length=255, verbose_name=\"object id\"),\n ),\n migrations.RenameField(\n model_name=\"revision\",\n old_name=\"page\",\n new_name=\"object_id\",\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"base_content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"content_type\", \"object_id\"],\n name=\"content_object_idx\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"base_content_type\", \"object_id\"],\n name=\"base_content_object_idx\",\n ),\n ),\n ]\n", "path": "wagtail/migrations/0070_rename_pagerevision_revision.py"}], "after_files": [{"content": "# Generated by Django 4.0.3 on 2022-04-26 12:31\n\nfrom django.conf import settings\nfrom django.db import migrations, models\n\n\ndef disable_sqlite_legacy_alter_table(apps, schema_editor):\n # Fix for https://github.com/wagtail/wagtail/issues/8635\n if schema_editor.connection.vendor == \"sqlite\":\n schema_editor.execute(\"PRAGMA legacy_alter_table = OFF\")\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n (\"wagtailcore\", \"0069_log_entry_jsonfield\"),\n ]\n\n operations = [\n migrations.RunPython(\n disable_sqlite_legacy_alter_table,\n migrations.RunPython.noop,\n ),\n migrations.RenameModel(\n old_name=\"PageRevision\",\n new_name=\"Revision\",\n ),\n migrations.AlterModelOptions(\n name=\"revision\",\n options={\"verbose_name\": \"revision\", \"verbose_name_plural\": \"revisions\"},\n ),\n migrations.AlterField(\n model_name=\"revision\",\n name=\"page\",\n field=models.CharField(max_length=255, verbose_name=\"object id\"),\n ),\n migrations.RenameField(\n model_name=\"revision\",\n old_name=\"page\",\n new_name=\"object_id\",\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddField(\n model_name=\"revision\",\n name=\"base_content_type\",\n field=models.ForeignKey(\n null=True,\n on_delete=models.CASCADE,\n related_name=\"+\",\n to=\"contenttypes.contenttype\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"content_type\", \"object_id\"],\n name=\"content_object_idx\",\n ),\n ),\n migrations.AddIndex(\n model_name=\"revision\",\n index=models.Index(\n fields=[\"base_content_type\", \"object_id\"],\n name=\"base_content_object_idx\",\n ),\n ),\n ]\n", "path": "wagtail/migrations/0070_rename_pagerevision_revision.py"}]}
| 1,827 | 239 |
gh_patches_debug_11462
|
rasdani/github-patches
|
git_diff
|
sublimelsp__LSP-772
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Completion inputs label instead of textEdit newText
scalameta/metals#1031 was recently merged which adds an "implement all members" completion option. However, in Sublime it seems to not show up in the same order in the completions as the other editors. It seems to be triggered by e for some reason. Apart from that, if you do decide to use that completion, the completion seems to instead of implementing the `newText` it implements the `label`.
I'm on MacOS using the [Metals Language Server](https://github.com/scalameta/metals) with this SNAPSHOT `0.7.6+224-b3ea857f-SNAPSHOT`
Here is a gif illustrating what I'm talking about

And here is the snippet of lsp json that shows the completion item
```
[Trace - 08:54:53 AM] Received request 'completionItem/resolve - (30)'
Params: {
"label": "Implement all members",
"kind": 12,
"sortText": "00002",
"filterText": "e",
"insertTextFormat": 2,
"textEdit": {
"range": {
"start": {
"line": 9,
"character": 3
},
"end": {
"line": 9,
"character": 4
}
},
"newText": "def foo: Int \u003d ${0:???}\n def boo: Int \u003d ${0:???}"
},
"data": {
"target": "file:/Users/ckipp/Documents/scala-workspace/test-project/?id\u003droot",
"symbol": "local6"
}
}
[Trace - 08:54:53 AM] Sending response 'completionItem/resolve - (30)'. Processing request took 1ms
Result: {
"label": "Implement all members",
"kind": 12,
"sortText": "00002",
"filterText": "e",
"insertTextFormat": 2,
"textEdit": {
"range": {
"start": {
"line": 9,
"character": 3
},
"end": {
"line": 9,
"character": 4
}
},
"newText": "def foo: Int \u003d ${0:???}\n def boo: Int \u003d ${0:???}"
},
"data": {
"target": "file:/Users/ckipp/Documents/scala-workspace/test-project/?id\u003droot",
"symbol": "local6"
}
}
```
If I can provide any more details, just let me know!
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `plugin/core/completion.py`
Content:
```
1 from .protocol import CompletionItemKind, Range
2 from .types import Settings
3 from .logging import debug
4 try:
5 from typing import Tuple, Optional, Dict, List, Union
6 assert Tuple and Optional and Dict and List and Union and Settings
7 except ImportError:
8 pass
9
10
11 completion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}
12
13
14 def get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':
15 # choose hint based on availability and user preference
16 hint = None
17 if settings.completion_hint_type == "auto":
18 hint = item.get("detail")
19 if not hint:
20 kind = item.get("kind")
21 if kind:
22 hint = completion_item_kind_names[kind]
23 elif settings.completion_hint_type == "detail":
24 hint = item.get("detail")
25 elif settings.completion_hint_type == "kind":
26 kind = item.get("kind")
27 if kind:
28 hint = completion_item_kind_names.get(kind)
29 return hint
30
31
32 def format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':
33 # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.
34 if settings.prefer_label_over_filter_text:
35 trigger = item["label"]
36 else:
37 trigger = item.get("filterText") or item["label"]
38
39 hint = get_completion_hint(item, settings)
40
41 # label is an alternative for insertText if neither textEdit nor insertText is provided
42 replacement = text_edit_text(item, word_col) or item.get("insertText") or trigger
43
44 if replacement[0] != trigger[0]:
45 # fix some common cases when server sends different start on label and replacement.
46 if replacement[0] == '$':
47 trigger = '$' + trigger # add missing $
48 elif replacement[0] == '-':
49 trigger = '-' + trigger # add missing -
50 elif trigger[0] == ':':
51 replacement = ':' + replacement # add missing :
52 elif trigger[0] == '$':
53 trigger = trigger[1:] # remove leading $
54 elif trigger[0] == ' ' or trigger[0] == '•':
55 trigger = trigger[1:] # remove clangd insertion indicator
56 else:
57 debug("replacement prefix does not match trigger!")
58 replacement = item.get("insertText") or trigger
59
60 if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.
61 replacement = '\\$' + replacement[1:]
62 # only return trigger with a hint if available
63 return "\t ".join((trigger, hint)) if hint else trigger, replacement
64
65
66 def text_edit_text(item: dict, word_col: int) -> 'Optional[str]':
67 text_edit = item.get('textEdit')
68 if text_edit:
69 edit_range, edit_text = text_edit.get("range"), text_edit.get("newText")
70 if edit_range and edit_text:
71 edit_range = Range.from_lsp(edit_range)
72
73 # debug('textEdit from col {}, {} applied at col {}'.format(
74 # edit_range.start.col, edit_range.end.col, word_col))
75
76 if edit_range.start.col <= word_col:
77 # if edit starts at current word, we can use it.
78 # if edit starts before current word, use the whole thing and we'll fix it up later.
79 return edit_text
80
81 return None
82
83
84 def parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':
85 items = [] # type: List[Dict]
86 is_incomplete = False
87 if isinstance(response, dict):
88 items = response["items"] or []
89 is_incomplete = response.get("isIncomplete", False)
90 elif isinstance(response, list):
91 items = response
92 items = sorted(items, key=lambda item: item.get("sortText") or item["label"])
93 return items, is_incomplete
94
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/plugin/core/completion.py b/plugin/core/completion.py
--- a/plugin/core/completion.py
+++ b/plugin/core/completion.py
@@ -54,8 +54,7 @@
elif trigger[0] == ' ' or trigger[0] == '•':
trigger = trigger[1:] # remove clangd insertion indicator
else:
- debug("replacement prefix does not match trigger!")
- replacement = item.get("insertText") or trigger
+ debug("WARNING: Replacement prefix does not match trigger '{}'".format(trigger))
if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.
replacement = '\\$' + replacement[1:]
|
{"golden_diff": "diff --git a/plugin/core/completion.py b/plugin/core/completion.py\n--- a/plugin/core/completion.py\n+++ b/plugin/core/completion.py\n@@ -54,8 +54,7 @@\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n- debug(\"replacement prefix does not match trigger!\")\n- replacement = item.get(\"insertText\") or trigger\n+ debug(\"WARNING: Replacement prefix does not match trigger '{}'\".format(trigger))\n \n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n", "issue": "Completion inputs label instead of textEdit newText\nscalameta/metals#1031 was recently merged which adds an \"implement all members\" completion option. However, in Sublime it seems to not show up in the same order in the completions as the other editors. It seems to be triggered by e for some reason. Apart from that, if you do decide to use that completion, the completion seems to instead of implementing the `newText` it implements the `label`.\r\n\r\nI'm on MacOS using the [Metals Language Server](https://github.com/scalameta/metals) with this SNAPSHOT `0.7.6+224-b3ea857f-SNAPSHOT`\r\n\r\nHere is a gif illustrating what I'm talking about\r\n\r\n\r\n\r\nAnd here is the snippet of lsp json that shows the completion item\r\n\r\n```\r\n[Trace - 08:54:53 AM] Received request 'completionItem/resolve - (30)'\r\nParams: {\r\n \"label\": \"Implement all members\",\r\n \"kind\": 12,\r\n \"sortText\": \"00002\",\r\n \"filterText\": \"e\",\r\n \"insertTextFormat\": 2,\r\n \"textEdit\": {\r\n \"range\": {\r\n \"start\": {\r\n \"line\": 9,\r\n \"character\": 3\r\n },\r\n \"end\": {\r\n \"line\": 9,\r\n \"character\": 4\r\n }\r\n },\r\n \"newText\": \"def foo: Int \\u003d ${0:???}\\n def boo: Int \\u003d ${0:???}\"\r\n },\r\n \"data\": {\r\n \"target\": \"file:/Users/ckipp/Documents/scala-workspace/test-project/?id\\u003droot\",\r\n \"symbol\": \"local6\"\r\n }\r\n}\r\n\r\n\r\n[Trace - 08:54:53 AM] Sending response 'completionItem/resolve - (30)'. Processing request took 1ms\r\nResult: {\r\n \"label\": \"Implement all members\",\r\n \"kind\": 12,\r\n \"sortText\": \"00002\",\r\n \"filterText\": \"e\",\r\n \"insertTextFormat\": 2,\r\n \"textEdit\": {\r\n \"range\": {\r\n \"start\": {\r\n \"line\": 9,\r\n \"character\": 3\r\n },\r\n \"end\": {\r\n \"line\": 9,\r\n \"character\": 4\r\n }\r\n },\r\n \"newText\": \"def foo: Int \\u003d ${0:???}\\n def boo: Int \\u003d ${0:???}\"\r\n },\r\n \"data\": {\r\n \"target\": \"file:/Users/ckipp/Documents/scala-workspace/test-project/?id\\u003droot\",\r\n \"symbol\": \"local6\"\r\n }\r\n}\r\n```\r\n\r\nIf I can provide any more details, just let me know!\n", "before_files": [{"content": "from .protocol import CompletionItemKind, Range\nfrom .types import Settings\nfrom .logging import debug\ntry:\n from typing import Tuple, Optional, Dict, List, Union\n assert Tuple and Optional and Dict and List and Union and Settings\nexcept ImportError:\n pass\n\n\ncompletion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}\n\n\ndef get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':\n # choose hint based on availability and user preference\n hint = None\n if settings.completion_hint_type == \"auto\":\n hint = item.get(\"detail\")\n if not hint:\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names[kind]\n elif settings.completion_hint_type == \"detail\":\n hint = item.get(\"detail\")\n elif settings.completion_hint_type == \"kind\":\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names.get(kind)\n return hint\n\n\ndef format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':\n # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.\n if settings.prefer_label_over_filter_text:\n trigger = item[\"label\"]\n else:\n trigger = item.get(\"filterText\") or item[\"label\"]\n\n hint = get_completion_hint(item, settings)\n\n # label is an alternative for insertText if neither textEdit nor insertText is provided\n replacement = text_edit_text(item, word_col) or item.get(\"insertText\") or trigger\n\n if replacement[0] != trigger[0]:\n # fix some common cases when server sends different start on label and replacement.\n if replacement[0] == '$':\n trigger = '$' + trigger # add missing $\n elif replacement[0] == '-':\n trigger = '-' + trigger # add missing -\n elif trigger[0] == ':':\n replacement = ':' + replacement # add missing :\n elif trigger[0] == '$':\n trigger = trigger[1:] # remove leading $\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n debug(\"replacement prefix does not match trigger!\")\n replacement = item.get(\"insertText\") or trigger\n\n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n # only return trigger with a hint if available\n return \"\\t \".join((trigger, hint)) if hint else trigger, replacement\n\n\ndef text_edit_text(item: dict, word_col: int) -> 'Optional[str]':\n text_edit = item.get('textEdit')\n if text_edit:\n edit_range, edit_text = text_edit.get(\"range\"), text_edit.get(\"newText\")\n if edit_range and edit_text:\n edit_range = Range.from_lsp(edit_range)\n\n # debug('textEdit from col {}, {} applied at col {}'.format(\n # edit_range.start.col, edit_range.end.col, word_col))\n\n if edit_range.start.col <= word_col:\n # if edit starts at current word, we can use it.\n # if edit starts before current word, use the whole thing and we'll fix it up later.\n return edit_text\n\n return None\n\n\ndef parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':\n items = [] # type: List[Dict]\n is_incomplete = False\n if isinstance(response, dict):\n items = response[\"items\"] or []\n is_incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n items = response\n items = sorted(items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n return items, is_incomplete\n", "path": "plugin/core/completion.py"}], "after_files": [{"content": "from .protocol import CompletionItemKind, Range\nfrom .types import Settings\nfrom .logging import debug\ntry:\n from typing import Tuple, Optional, Dict, List, Union\n assert Tuple and Optional and Dict and List and Union and Settings\nexcept ImportError:\n pass\n\n\ncompletion_item_kind_names = {v: k for k, v in CompletionItemKind.__dict__.items()}\n\n\ndef get_completion_hint(item: dict, settings: 'Settings') -> 'Optional[str]':\n # choose hint based on availability and user preference\n hint = None\n if settings.completion_hint_type == \"auto\":\n hint = item.get(\"detail\")\n if not hint:\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names[kind]\n elif settings.completion_hint_type == \"detail\":\n hint = item.get(\"detail\")\n elif settings.completion_hint_type == \"kind\":\n kind = item.get(\"kind\")\n if kind:\n hint = completion_item_kind_names.get(kind)\n return hint\n\n\ndef format_completion(item: dict, word_col: int, settings: 'Settings') -> 'Tuple[str, str]':\n # Sublime handles snippets automatically, so we don't have to care about insertTextFormat.\n if settings.prefer_label_over_filter_text:\n trigger = item[\"label\"]\n else:\n trigger = item.get(\"filterText\") or item[\"label\"]\n\n hint = get_completion_hint(item, settings)\n\n # label is an alternative for insertText if neither textEdit nor insertText is provided\n replacement = text_edit_text(item, word_col) or item.get(\"insertText\") or trigger\n\n if replacement[0] != trigger[0]:\n # fix some common cases when server sends different start on label and replacement.\n if replacement[0] == '$':\n trigger = '$' + trigger # add missing $\n elif replacement[0] == '-':\n trigger = '-' + trigger # add missing -\n elif trigger[0] == ':':\n replacement = ':' + replacement # add missing :\n elif trigger[0] == '$':\n trigger = trigger[1:] # remove leading $\n elif trigger[0] == ' ' or trigger[0] == '\u2022':\n trigger = trigger[1:] # remove clangd insertion indicator\n else:\n debug(\"WARNING: Replacement prefix does not match trigger '{}'\".format(trigger))\n\n if len(replacement) > 0 and replacement[0] == '$': # sublime needs leading '$' escaped.\n replacement = '\\\\$' + replacement[1:]\n # only return trigger with a hint if available\n return \"\\t \".join((trigger, hint)) if hint else trigger, replacement\n\n\ndef text_edit_text(item: dict, word_col: int) -> 'Optional[str]':\n text_edit = item.get('textEdit')\n if text_edit:\n edit_range, edit_text = text_edit.get(\"range\"), text_edit.get(\"newText\")\n if edit_range and edit_text:\n edit_range = Range.from_lsp(edit_range)\n\n # debug('textEdit from col {}, {} applied at col {}'.format(\n # edit_range.start.col, edit_range.end.col, word_col))\n\n if edit_range.start.col <= word_col:\n # if edit starts at current word, we can use it.\n # if edit starts before current word, use the whole thing and we'll fix it up later.\n return edit_text\n\n return None\n\n\ndef parse_completion_response(response: 'Optional[Union[Dict,List]]') -> 'Tuple[List[Dict], bool]':\n items = [] # type: List[Dict]\n is_incomplete = False\n if isinstance(response, dict):\n items = response[\"items\"] or []\n is_incomplete = response.get(\"isIncomplete\", False)\n elif isinstance(response, list):\n items = response\n items = sorted(items, key=lambda item: item.get(\"sortText\") or item[\"label\"])\n return items, is_incomplete\n", "path": "plugin/core/completion.py"}]}
| 1,998 | 159 |
gh_patches_debug_29846
|
rasdani/github-patches
|
git_diff
|
googleapis__google-cloud-python-5569
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
google.cloud.logging.handlers: send messages to stderr
Would it be ok if we do the print at https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/logging/google/cloud/logging/handlers/transports/background_thread.py#L222
to stderr instead? So it doesn't disturb the output of the software?
Thanks
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `logging/google/cloud/logging/handlers/transports/background_thread.py`
Content:
```
1 # Copyright 2016 Google LLC
2 #
3 # Licensed under the Apache License, Version 2.0 (the "License");
4 # you may not use this file except in compliance with the License.
5 # You may obtain a copy of the License at
6 #
7 # http://www.apache.org/licenses/LICENSE-2.0
8 #
9 # Unless required by applicable law or agreed to in writing, software
10 # distributed under the License is distributed on an "AS IS" BASIS,
11 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 # See the License for the specific language governing permissions and
13 # limitations under the License.
14
15 """Transport for Python logging handler
16
17 Uses a background worker to log to Stackdriver Logging asynchronously.
18 """
19
20 from __future__ import print_function
21
22 import atexit
23 import logging
24 import threading
25 import time
26
27 from six.moves import range
28 from six.moves import queue
29
30 from google.cloud.logging.handlers.transports.base import Transport
31
32 _DEFAULT_GRACE_PERIOD = 5.0 # Seconds
33 _DEFAULT_MAX_BATCH_SIZE = 10
34 _DEFAULT_MAX_LATENCY = 0 # Seconds
35 _WORKER_THREAD_NAME = 'google.cloud.logging.Worker'
36 _WORKER_TERMINATOR = object()
37 _LOGGER = logging.getLogger(__name__)
38
39
40 def _get_many(queue_, max_items=None, max_latency=0):
41 """Get multiple items from a Queue.
42
43 Gets at least one (blocking) and at most ``max_items`` items
44 (non-blocking) from a given Queue. Does not mark the items as done.
45
46 :type queue_: :class:`~queue.Queue`
47 :param queue_: The Queue to get items from.
48
49 :type max_items: int
50 :param max_items: The maximum number of items to get. If ``None``, then all
51 available items in the queue are returned.
52
53 :type max_latency: float
54 :param max_latency: The maximum number of seconds to wait for more than one
55 item from a queue. This number includes the time required to retrieve
56 the first item.
57
58 :rtype: Sequence
59 :returns: A sequence of items retrieved from the queue.
60 """
61 start = time.time()
62 # Always return at least one item.
63 items = [queue_.get()]
64 while max_items is None or len(items) < max_items:
65 try:
66 elapsed = time.time() - start
67 timeout = max(0, max_latency - elapsed)
68 items.append(queue_.get(timeout=timeout))
69 except queue.Empty:
70 break
71 return items
72
73
74 class _Worker(object):
75 """A background thread that writes batches of log entries.
76
77 :type cloud_logger: :class:`~google.cloud.logging.logger.Logger`
78 :param cloud_logger: The logger to send entries to.
79
80 :type grace_period: float
81 :param grace_period: The amount of time to wait for pending logs to
82 be submitted when the process is shutting down.
83
84 :type max_batch_size: int
85 :param max_batch_size: The maximum number of items to send at a time
86 in the background thread.
87
88 :type max_latency: float
89 :param max_latency: The amount of time to wait for new logs before
90 sending a new batch. It is strongly recommended to keep this smaller
91 than the grace_period. This means this is effectively the longest
92 amount of time the background thread will hold onto log entries
93 before sending them to the server.
94 """
95
96 def __init__(self, cloud_logger, grace_period=_DEFAULT_GRACE_PERIOD,
97 max_batch_size=_DEFAULT_MAX_BATCH_SIZE,
98 max_latency=_DEFAULT_MAX_LATENCY):
99 self._cloud_logger = cloud_logger
100 self._grace_period = grace_period
101 self._max_batch_size = max_batch_size
102 self._max_latency = max_latency
103 self._queue = queue.Queue(0)
104 self._operational_lock = threading.Lock()
105 self._thread = None
106
107 @property
108 def is_alive(self):
109 """Returns True is the background thread is running."""
110 return self._thread is not None and self._thread.is_alive()
111
112 def _safely_commit_batch(self, batch):
113 total_logs = len(batch.entries)
114
115 try:
116 if total_logs > 0:
117 batch.commit()
118 _LOGGER.debug('Submitted %d logs', total_logs)
119 except Exception:
120 _LOGGER.error(
121 'Failed to submit %d logs.', total_logs, exc_info=True)
122
123 def _thread_main(self):
124 """The entry point for the worker thread.
125
126 Pulls pending log entries off the queue and writes them in batches to
127 the Cloud Logger.
128 """
129 _LOGGER.debug('Background thread started.')
130
131 quit_ = False
132 while True:
133 batch = self._cloud_logger.batch()
134 items = _get_many(
135 self._queue, max_items=self._max_batch_size,
136 max_latency=self._max_latency)
137
138 for item in items:
139 if item is _WORKER_TERMINATOR:
140 quit_ = True
141 # Continue processing items, don't break, try to process
142 # all items we got back before quitting.
143 else:
144 batch.log_struct(**item)
145
146 self._safely_commit_batch(batch)
147
148 for _ in range(len(items)):
149 self._queue.task_done()
150
151 if quit_:
152 break
153
154 _LOGGER.debug('Background thread exited gracefully.')
155
156 def start(self):
157 """Starts the background thread.
158
159 Additionally, this registers a handler for process exit to attempt
160 to send any pending log entries before shutdown.
161 """
162 with self._operational_lock:
163 if self.is_alive:
164 return
165
166 self._thread = threading.Thread(
167 target=self._thread_main,
168 name=_WORKER_THREAD_NAME)
169 self._thread.daemon = True
170 self._thread.start()
171 atexit.register(self._main_thread_terminated)
172
173 def stop(self, grace_period=None):
174 """Signals the background thread to stop.
175
176 This does not terminate the background thread. It simply queues the
177 stop signal. If the main process exits before the background thread
178 processes the stop signal, it will be terminated without finishing
179 work. The ``grace_period`` parameter will give the background
180 thread some time to finish processing before this function returns.
181
182 :type grace_period: float
183 :param grace_period: If specified, this method will block up to this
184 many seconds to allow the background thread to finish work before
185 returning.
186
187 :rtype: bool
188 :returns: True if the thread terminated. False if the thread is still
189 running.
190 """
191 if not self.is_alive:
192 return True
193
194 with self._operational_lock:
195 self._queue.put_nowait(_WORKER_TERMINATOR)
196
197 if grace_period is not None:
198 print('Waiting up to %d seconds.' % (grace_period,))
199
200 self._thread.join(timeout=grace_period)
201
202 # Check this before disowning the thread, because after we disown
203 # the thread is_alive will be False regardless of if the thread
204 # exited or not.
205 success = not self.is_alive
206
207 self._thread = None
208
209 return success
210
211 def _main_thread_terminated(self):
212 """Callback that attempts to send pending logs before termination."""
213 if not self.is_alive:
214 return
215
216 if not self._queue.empty():
217 print(
218 'Program shutting down, attempting to send %d queued log '
219 'entries to Stackdriver Logging...' % (self._queue.qsize(),))
220
221 if self.stop(self._grace_period):
222 print('Sent all pending logs.')
223 else:
224 print('Failed to send %d pending logs.' % (self._queue.qsize(),))
225
226 def enqueue(self, record, message, resource=None, labels=None):
227 """Queues a log entry to be written by the background thread.
228
229 :type record: :class:`logging.LogRecord`
230 :param record: Python log record that the handler was called with.
231
232 :type message: str
233 :param message: The message from the ``LogRecord`` after being
234 formatted by the associated log formatters.
235
236 :type resource: :class:`~google.cloud.logging.resource.Resource`
237 :param resource: (Optional) Monitored resource of the entry
238
239 :type labels: dict
240 :param labels: (Optional) Mapping of labels for the entry.
241 """
242 self._queue.put_nowait({
243 'info': {
244 'message': message,
245 'python_logger': record.name,
246 },
247 'severity': record.levelname,
248 'resource': resource,
249 'labels': labels,
250 })
251
252 def flush(self):
253 """Submit any pending log records."""
254 self._queue.join()
255
256
257 class BackgroundThreadTransport(Transport):
258 """Asynchronous transport that uses a background thread.
259
260 :type client: :class:`~google.cloud.logging.client.Client`
261 :param client: The Logging client.
262
263 :type name: str
264 :param name: the name of the logger.
265
266 :type grace_period: float
267 :param grace_period: The amount of time to wait for pending logs to
268 be submitted when the process is shutting down.
269
270 :type batch_size: int
271 :param batch_size: The maximum number of items to send at a time in the
272 background thread.
273
274 :type max_latency: float
275 :param max_latency: The amount of time to wait for new logs before
276 sending a new batch. It is strongly recommended to keep this smaller
277 than the grace_period. This means this is effectively the longest
278 amount of time the background thread will hold onto log entries
279 before sending them to the server.
280 """
281
282 def __init__(self, client, name, grace_period=_DEFAULT_GRACE_PERIOD,
283 batch_size=_DEFAULT_MAX_BATCH_SIZE,
284 max_latency=_DEFAULT_MAX_LATENCY):
285 self.client = client
286 logger = self.client.logger(name)
287 self.worker = _Worker(logger,
288 grace_period=grace_period,
289 max_batch_size=batch_size,
290 max_latency=max_latency)
291 self.worker.start()
292
293 def send(self, record, message, resource=None, labels=None):
294 """Overrides Transport.send().
295
296 :type record: :class:`logging.LogRecord`
297 :param record: Python log record that the handler was called with.
298
299 :type message: str
300 :param message: The message from the ``LogRecord`` after being
301 formatted by the associated log formatters.
302
303 :type resource: :class:`~google.cloud.logging.resource.Resource`
304 :param resource: (Optional) Monitored resource of the entry.
305
306 :type labels: dict
307 :param labels: (Optional) Mapping of labels for the entry.
308 """
309 self.worker.enqueue(record, message, resource=resource, labels=labels)
310
311 def flush(self):
312 """Submit any pending log records."""
313 self.worker.flush()
314
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/logging/google/cloud/logging/handlers/transports/background_thread.py b/logging/google/cloud/logging/handlers/transports/background_thread.py
--- a/logging/google/cloud/logging/handlers/transports/background_thread.py
+++ b/logging/google/cloud/logging/handlers/transports/background_thread.py
@@ -21,6 +21,7 @@
import atexit
import logging
+import sys
import threading
import time
@@ -195,7 +196,9 @@
self._queue.put_nowait(_WORKER_TERMINATOR)
if grace_period is not None:
- print('Waiting up to %d seconds.' % (grace_period,))
+ print(
+ 'Waiting up to %d seconds.' % (grace_period,),
+ file=sys.stderr)
self._thread.join(timeout=grace_period)
@@ -216,12 +219,15 @@
if not self._queue.empty():
print(
'Program shutting down, attempting to send %d queued log '
- 'entries to Stackdriver Logging...' % (self._queue.qsize(),))
+ 'entries to Stackdriver Logging...' % (self._queue.qsize(),),
+ file=sys.stderr)
if self.stop(self._grace_period):
- print('Sent all pending logs.')
+ print('Sent all pending logs.', file=sys.stderr)
else:
- print('Failed to send %d pending logs.' % (self._queue.qsize(),))
+ print(
+ 'Failed to send %d pending logs.' % (self._queue.qsize(),),
+ file=sys.stderr)
def enqueue(self, record, message, resource=None, labels=None):
"""Queues a log entry to be written by the background thread.
|
{"golden_diff": "diff --git a/logging/google/cloud/logging/handlers/transports/background_thread.py b/logging/google/cloud/logging/handlers/transports/background_thread.py\n--- a/logging/google/cloud/logging/handlers/transports/background_thread.py\n+++ b/logging/google/cloud/logging/handlers/transports/background_thread.py\n@@ -21,6 +21,7 @@\n \n import atexit\n import logging\n+import sys\n import threading\n import time\n \n@@ -195,7 +196,9 @@\n self._queue.put_nowait(_WORKER_TERMINATOR)\n \n if grace_period is not None:\n- print('Waiting up to %d seconds.' % (grace_period,))\n+ print(\n+ 'Waiting up to %d seconds.' % (grace_period,),\n+ file=sys.stderr)\n \n self._thread.join(timeout=grace_period)\n \n@@ -216,12 +219,15 @@\n if not self._queue.empty():\n print(\n 'Program shutting down, attempting to send %d queued log '\n- 'entries to Stackdriver Logging...' % (self._queue.qsize(),))\n+ 'entries to Stackdriver Logging...' % (self._queue.qsize(),),\n+ file=sys.stderr)\n \n if self.stop(self._grace_period):\n- print('Sent all pending logs.')\n+ print('Sent all pending logs.', file=sys.stderr)\n else:\n- print('Failed to send %d pending logs.' % (self._queue.qsize(),))\n+ print(\n+ 'Failed to send %d pending logs.' % (self._queue.qsize(),),\n+ file=sys.stderr)\n \n def enqueue(self, record, message, resource=None, labels=None):\n \"\"\"Queues a log entry to be written by the background thread.\n", "issue": "google.cloud.logging.handlers: send messages to stderr\nWould it be ok if we do the print at https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/logging/google/cloud/logging/handlers/transports/background_thread.py#L222\r\nto stderr instead? So it doesn't disturb the output of the software?\r\n\r\nThanks\n", "before_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport for Python logging handler\n\nUses a background worker to log to Stackdriver Logging asynchronously.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport atexit\nimport logging\nimport threading\nimport time\n\nfrom six.moves import range\nfrom six.moves import queue\n\nfrom google.cloud.logging.handlers.transports.base import Transport\n\n_DEFAULT_GRACE_PERIOD = 5.0 # Seconds\n_DEFAULT_MAX_BATCH_SIZE = 10\n_DEFAULT_MAX_LATENCY = 0 # Seconds\n_WORKER_THREAD_NAME = 'google.cloud.logging.Worker'\n_WORKER_TERMINATOR = object()\n_LOGGER = logging.getLogger(__name__)\n\n\ndef _get_many(queue_, max_items=None, max_latency=0):\n \"\"\"Get multiple items from a Queue.\n\n Gets at least one (blocking) and at most ``max_items`` items\n (non-blocking) from a given Queue. Does not mark the items as done.\n\n :type queue_: :class:`~queue.Queue`\n :param queue_: The Queue to get items from.\n\n :type max_items: int\n :param max_items: The maximum number of items to get. If ``None``, then all\n available items in the queue are returned.\n\n :type max_latency: float\n :param max_latency: The maximum number of seconds to wait for more than one\n item from a queue. This number includes the time required to retrieve\n the first item.\n\n :rtype: Sequence\n :returns: A sequence of items retrieved from the queue.\n \"\"\"\n start = time.time()\n # Always return at least one item.\n items = [queue_.get()]\n while max_items is None or len(items) < max_items:\n try:\n elapsed = time.time() - start\n timeout = max(0, max_latency - elapsed)\n items.append(queue_.get(timeout=timeout))\n except queue.Empty:\n break\n return items\n\n\nclass _Worker(object):\n \"\"\"A background thread that writes batches of log entries.\n\n :type cloud_logger: :class:`~google.cloud.logging.logger.Logger`\n :param cloud_logger: The logger to send entries to.\n\n :type grace_period: float\n :param grace_period: The amount of time to wait for pending logs to\n be submitted when the process is shutting down.\n\n :type max_batch_size: int\n :param max_batch_size: The maximum number of items to send at a time\n in the background thread.\n\n :type max_latency: float\n :param max_latency: The amount of time to wait for new logs before\n sending a new batch. It is strongly recommended to keep this smaller\n than the grace_period. This means this is effectively the longest\n amount of time the background thread will hold onto log entries\n before sending them to the server.\n \"\"\"\n\n def __init__(self, cloud_logger, grace_period=_DEFAULT_GRACE_PERIOD,\n max_batch_size=_DEFAULT_MAX_BATCH_SIZE,\n max_latency=_DEFAULT_MAX_LATENCY):\n self._cloud_logger = cloud_logger\n self._grace_period = grace_period\n self._max_batch_size = max_batch_size\n self._max_latency = max_latency\n self._queue = queue.Queue(0)\n self._operational_lock = threading.Lock()\n self._thread = None\n\n @property\n def is_alive(self):\n \"\"\"Returns True is the background thread is running.\"\"\"\n return self._thread is not None and self._thread.is_alive()\n\n def _safely_commit_batch(self, batch):\n total_logs = len(batch.entries)\n\n try:\n if total_logs > 0:\n batch.commit()\n _LOGGER.debug('Submitted %d logs', total_logs)\n except Exception:\n _LOGGER.error(\n 'Failed to submit %d logs.', total_logs, exc_info=True)\n\n def _thread_main(self):\n \"\"\"The entry point for the worker thread.\n\n Pulls pending log entries off the queue and writes them in batches to\n the Cloud Logger.\n \"\"\"\n _LOGGER.debug('Background thread started.')\n\n quit_ = False\n while True:\n batch = self._cloud_logger.batch()\n items = _get_many(\n self._queue, max_items=self._max_batch_size,\n max_latency=self._max_latency)\n\n for item in items:\n if item is _WORKER_TERMINATOR:\n quit_ = True\n # Continue processing items, don't break, try to process\n # all items we got back before quitting.\n else:\n batch.log_struct(**item)\n\n self._safely_commit_batch(batch)\n\n for _ in range(len(items)):\n self._queue.task_done()\n\n if quit_:\n break\n\n _LOGGER.debug('Background thread exited gracefully.')\n\n def start(self):\n \"\"\"Starts the background thread.\n\n Additionally, this registers a handler for process exit to attempt\n to send any pending log entries before shutdown.\n \"\"\"\n with self._operational_lock:\n if self.is_alive:\n return\n\n self._thread = threading.Thread(\n target=self._thread_main,\n name=_WORKER_THREAD_NAME)\n self._thread.daemon = True\n self._thread.start()\n atexit.register(self._main_thread_terminated)\n\n def stop(self, grace_period=None):\n \"\"\"Signals the background thread to stop.\n\n This does not terminate the background thread. It simply queues the\n stop signal. If the main process exits before the background thread\n processes the stop signal, it will be terminated without finishing\n work. The ``grace_period`` parameter will give the background\n thread some time to finish processing before this function returns.\n\n :type grace_period: float\n :param grace_period: If specified, this method will block up to this\n many seconds to allow the background thread to finish work before\n returning.\n\n :rtype: bool\n :returns: True if the thread terminated. False if the thread is still\n running.\n \"\"\"\n if not self.is_alive:\n return True\n\n with self._operational_lock:\n self._queue.put_nowait(_WORKER_TERMINATOR)\n\n if grace_period is not None:\n print('Waiting up to %d seconds.' % (grace_period,))\n\n self._thread.join(timeout=grace_period)\n\n # Check this before disowning the thread, because after we disown\n # the thread is_alive will be False regardless of if the thread\n # exited or not.\n success = not self.is_alive\n\n self._thread = None\n\n return success\n\n def _main_thread_terminated(self):\n \"\"\"Callback that attempts to send pending logs before termination.\"\"\"\n if not self.is_alive:\n return\n\n if not self._queue.empty():\n print(\n 'Program shutting down, attempting to send %d queued log '\n 'entries to Stackdriver Logging...' % (self._queue.qsize(),))\n\n if self.stop(self._grace_period):\n print('Sent all pending logs.')\n else:\n print('Failed to send %d pending logs.' % (self._queue.qsize(),))\n\n def enqueue(self, record, message, resource=None, labels=None):\n \"\"\"Queues a log entry to be written by the background thread.\n\n :type record: :class:`logging.LogRecord`\n :param record: Python log record that the handler was called with.\n\n :type message: str\n :param message: The message from the ``LogRecord`` after being\n formatted by the associated log formatters.\n\n :type resource: :class:`~google.cloud.logging.resource.Resource`\n :param resource: (Optional) Monitored resource of the entry\n\n :type labels: dict\n :param labels: (Optional) Mapping of labels for the entry.\n \"\"\"\n self._queue.put_nowait({\n 'info': {\n 'message': message,\n 'python_logger': record.name,\n },\n 'severity': record.levelname,\n 'resource': resource,\n 'labels': labels,\n })\n\n def flush(self):\n \"\"\"Submit any pending log records.\"\"\"\n self._queue.join()\n\n\nclass BackgroundThreadTransport(Transport):\n \"\"\"Asynchronous transport that uses a background thread.\n\n :type client: :class:`~google.cloud.logging.client.Client`\n :param client: The Logging client.\n\n :type name: str\n :param name: the name of the logger.\n\n :type grace_period: float\n :param grace_period: The amount of time to wait for pending logs to\n be submitted when the process is shutting down.\n\n :type batch_size: int\n :param batch_size: The maximum number of items to send at a time in the\n background thread.\n\n :type max_latency: float\n :param max_latency: The amount of time to wait for new logs before\n sending a new batch. It is strongly recommended to keep this smaller\n than the grace_period. This means this is effectively the longest\n amount of time the background thread will hold onto log entries\n before sending them to the server.\n \"\"\"\n\n def __init__(self, client, name, grace_period=_DEFAULT_GRACE_PERIOD,\n batch_size=_DEFAULT_MAX_BATCH_SIZE,\n max_latency=_DEFAULT_MAX_LATENCY):\n self.client = client\n logger = self.client.logger(name)\n self.worker = _Worker(logger,\n grace_period=grace_period,\n max_batch_size=batch_size,\n max_latency=max_latency)\n self.worker.start()\n\n def send(self, record, message, resource=None, labels=None):\n \"\"\"Overrides Transport.send().\n\n :type record: :class:`logging.LogRecord`\n :param record: Python log record that the handler was called with.\n\n :type message: str\n :param message: The message from the ``LogRecord`` after being\n formatted by the associated log formatters.\n\n :type resource: :class:`~google.cloud.logging.resource.Resource`\n :param resource: (Optional) Monitored resource of the entry.\n\n :type labels: dict\n :param labels: (Optional) Mapping of labels for the entry.\n \"\"\"\n self.worker.enqueue(record, message, resource=resource, labels=labels)\n\n def flush(self):\n \"\"\"Submit any pending log records.\"\"\"\n self.worker.flush()\n", "path": "logging/google/cloud/logging/handlers/transports/background_thread.py"}], "after_files": [{"content": "# Copyright 2016 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Transport for Python logging handler\n\nUses a background worker to log to Stackdriver Logging asynchronously.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport atexit\nimport logging\nimport sys\nimport threading\nimport time\n\nfrom six.moves import range\nfrom six.moves import queue\n\nfrom google.cloud.logging.handlers.transports.base import Transport\n\n_DEFAULT_GRACE_PERIOD = 5.0 # Seconds\n_DEFAULT_MAX_BATCH_SIZE = 10\n_DEFAULT_MAX_LATENCY = 0 # Seconds\n_WORKER_THREAD_NAME = 'google.cloud.logging.Worker'\n_WORKER_TERMINATOR = object()\n_LOGGER = logging.getLogger(__name__)\n\n\ndef _get_many(queue_, max_items=None, max_latency=0):\n \"\"\"Get multiple items from a Queue.\n\n Gets at least one (blocking) and at most ``max_items`` items\n (non-blocking) from a given Queue. Does not mark the items as done.\n\n :type queue_: :class:`~queue.Queue`\n :param queue_: The Queue to get items from.\n\n :type max_items: int\n :param max_items: The maximum number of items to get. If ``None``, then all\n available items in the queue are returned.\n\n :type max_latency: float\n :param max_latency: The maximum number of seconds to wait for more than one\n item from a queue. This number includes the time required to retrieve\n the first item.\n\n :rtype: Sequence\n :returns: A sequence of items retrieved from the queue.\n \"\"\"\n start = time.time()\n # Always return at least one item.\n items = [queue_.get()]\n while max_items is None or len(items) < max_items:\n try:\n elapsed = time.time() - start\n timeout = max(0, max_latency - elapsed)\n items.append(queue_.get(timeout=timeout))\n except queue.Empty:\n break\n return items\n\n\nclass _Worker(object):\n \"\"\"A background thread that writes batches of log entries.\n\n :type cloud_logger: :class:`~google.cloud.logging.logger.Logger`\n :param cloud_logger: The logger to send entries to.\n\n :type grace_period: float\n :param grace_period: The amount of time to wait for pending logs to\n be submitted when the process is shutting down.\n\n :type max_batch_size: int\n :param max_batch_size: The maximum number of items to send at a time\n in the background thread.\n\n :type max_latency: float\n :param max_latency: The amount of time to wait for new logs before\n sending a new batch. It is strongly recommended to keep this smaller\n than the grace_period. This means this is effectively the longest\n amount of time the background thread will hold onto log entries\n before sending them to the server.\n \"\"\"\n\n def __init__(self, cloud_logger, grace_period=_DEFAULT_GRACE_PERIOD,\n max_batch_size=_DEFAULT_MAX_BATCH_SIZE,\n max_latency=_DEFAULT_MAX_LATENCY):\n self._cloud_logger = cloud_logger\n self._grace_period = grace_period\n self._max_batch_size = max_batch_size\n self._max_latency = max_latency\n self._queue = queue.Queue(0)\n self._operational_lock = threading.Lock()\n self._thread = None\n\n @property\n def is_alive(self):\n \"\"\"Returns True is the background thread is running.\"\"\"\n return self._thread is not None and self._thread.is_alive()\n\n def _safely_commit_batch(self, batch):\n total_logs = len(batch.entries)\n\n try:\n if total_logs > 0:\n batch.commit()\n _LOGGER.debug('Submitted %d logs', total_logs)\n except Exception:\n _LOGGER.error(\n 'Failed to submit %d logs.', total_logs, exc_info=True)\n\n def _thread_main(self):\n \"\"\"The entry point for the worker thread.\n\n Pulls pending log entries off the queue and writes them in batches to\n the Cloud Logger.\n \"\"\"\n _LOGGER.debug('Background thread started.')\n\n quit_ = False\n while True:\n batch = self._cloud_logger.batch()\n items = _get_many(\n self._queue, max_items=self._max_batch_size,\n max_latency=self._max_latency)\n\n for item in items:\n if item is _WORKER_TERMINATOR:\n quit_ = True\n # Continue processing items, don't break, try to process\n # all items we got back before quitting.\n else:\n batch.log_struct(**item)\n\n self._safely_commit_batch(batch)\n\n for _ in range(len(items)):\n self._queue.task_done()\n\n if quit_:\n break\n\n _LOGGER.debug('Background thread exited gracefully.')\n\n def start(self):\n \"\"\"Starts the background thread.\n\n Additionally, this registers a handler for process exit to attempt\n to send any pending log entries before shutdown.\n \"\"\"\n with self._operational_lock:\n if self.is_alive:\n return\n\n self._thread = threading.Thread(\n target=self._thread_main,\n name=_WORKER_THREAD_NAME)\n self._thread.daemon = True\n self._thread.start()\n atexit.register(self._main_thread_terminated)\n\n def stop(self, grace_period=None):\n \"\"\"Signals the background thread to stop.\n\n This does not terminate the background thread. It simply queues the\n stop signal. If the main process exits before the background thread\n processes the stop signal, it will be terminated without finishing\n work. The ``grace_period`` parameter will give the background\n thread some time to finish processing before this function returns.\n\n :type grace_period: float\n :param grace_period: If specified, this method will block up to this\n many seconds to allow the background thread to finish work before\n returning.\n\n :rtype: bool\n :returns: True if the thread terminated. False if the thread is still\n running.\n \"\"\"\n if not self.is_alive:\n return True\n\n with self._operational_lock:\n self._queue.put_nowait(_WORKER_TERMINATOR)\n\n if grace_period is not None:\n print(\n 'Waiting up to %d seconds.' % (grace_period,),\n file=sys.stderr)\n\n self._thread.join(timeout=grace_period)\n\n # Check this before disowning the thread, because after we disown\n # the thread is_alive will be False regardless of if the thread\n # exited or not.\n success = not self.is_alive\n\n self._thread = None\n\n return success\n\n def _main_thread_terminated(self):\n \"\"\"Callback that attempts to send pending logs before termination.\"\"\"\n if not self.is_alive:\n return\n\n if not self._queue.empty():\n print(\n 'Program shutting down, attempting to send %d queued log '\n 'entries to Stackdriver Logging...' % (self._queue.qsize(),),\n file=sys.stderr)\n\n if self.stop(self._grace_period):\n print('Sent all pending logs.', file=sys.stderr)\n else:\n print(\n 'Failed to send %d pending logs.' % (self._queue.qsize(),),\n file=sys.stderr)\n\n def enqueue(self, record, message, resource=None, labels=None):\n \"\"\"Queues a log entry to be written by the background thread.\n\n :type record: :class:`logging.LogRecord`\n :param record: Python log record that the handler was called with.\n\n :type message: str\n :param message: The message from the ``LogRecord`` after being\n formatted by the associated log formatters.\n\n :type resource: :class:`~google.cloud.logging.resource.Resource`\n :param resource: (Optional) Monitored resource of the entry\n\n :type labels: dict\n :param labels: (Optional) Mapping of labels for the entry.\n \"\"\"\n self._queue.put_nowait({\n 'info': {\n 'message': message,\n 'python_logger': record.name,\n },\n 'severity': record.levelname,\n 'resource': resource,\n 'labels': labels,\n })\n\n def flush(self):\n \"\"\"Submit any pending log records.\"\"\"\n self._queue.join()\n\n\nclass BackgroundThreadTransport(Transport):\n \"\"\"Asynchronous transport that uses a background thread.\n\n :type client: :class:`~google.cloud.logging.client.Client`\n :param client: The Logging client.\n\n :type name: str\n :param name: the name of the logger.\n\n :type grace_period: float\n :param grace_period: The amount of time to wait for pending logs to\n be submitted when the process is shutting down.\n\n :type batch_size: int\n :param batch_size: The maximum number of items to send at a time in the\n background thread.\n\n :type max_latency: float\n :param max_latency: The amount of time to wait for new logs before\n sending a new batch. It is strongly recommended to keep this smaller\n than the grace_period. This means this is effectively the longest\n amount of time the background thread will hold onto log entries\n before sending them to the server.\n \"\"\"\n\n def __init__(self, client, name, grace_period=_DEFAULT_GRACE_PERIOD,\n batch_size=_DEFAULT_MAX_BATCH_SIZE,\n max_latency=_DEFAULT_MAX_LATENCY):\n self.client = client\n logger = self.client.logger(name)\n self.worker = _Worker(logger,\n grace_period=grace_period,\n max_batch_size=batch_size,\n max_latency=max_latency)\n self.worker.start()\n\n def send(self, record, message, resource=None, labels=None):\n \"\"\"Overrides Transport.send().\n\n :type record: :class:`logging.LogRecord`\n :param record: Python log record that the handler was called with.\n\n :type message: str\n :param message: The message from the ``LogRecord`` after being\n formatted by the associated log formatters.\n\n :type resource: :class:`~google.cloud.logging.resource.Resource`\n :param resource: (Optional) Monitored resource of the entry.\n\n :type labels: dict\n :param labels: (Optional) Mapping of labels for the entry.\n \"\"\"\n self.worker.enqueue(record, message, resource=resource, labels=labels)\n\n def flush(self):\n \"\"\"Submit any pending log records.\"\"\"\n self.worker.flush()\n", "path": "logging/google/cloud/logging/handlers/transports/background_thread.py"}]}
| 3,526 | 383 |
gh_patches_debug_25655
|
rasdani/github-patches
|
git_diff
|
bookwyrm-social__bookwyrm-2949
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
not able to search for a user un-logged in
No option to search for a user without being logged in even though it says books or users in the non logged in search field
**Screenshots**


**Instance**
bookwyrm.social
---
**Desktop (please complete the following information):**
iOS 12, Firefox
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `bookwyrm/views/search.py`
Content:
```
1 """ search views"""
2 import re
3
4 from django.contrib.postgres.search import TrigramSimilarity
5 from django.core.paginator import Paginator
6 from django.db.models.functions import Greatest
7 from django.http import JsonResponse
8 from django.template.response import TemplateResponse
9 from django.views import View
10
11 from csp.decorators import csp_update
12
13 from bookwyrm import models
14 from bookwyrm.connectors import connector_manager
15 from bookwyrm.book_search import search, format_search_result
16 from bookwyrm.settings import PAGE_LENGTH
17 from bookwyrm.utils import regex
18 from .helpers import is_api_request
19 from .helpers import handle_remote_webfinger
20
21
22 # pylint: disable= no-self-use
23 class Search(View):
24 """search users or books"""
25
26 @csp_update(IMG_SRC="*")
27 def get(self, request):
28 """that search bar up top"""
29 if is_api_request(request):
30 return api_book_search(request)
31
32 query = request.GET.get("q")
33 if not query:
34 return TemplateResponse(request, "search/book.html")
35
36 search_type = request.GET.get("type")
37 if query and not search_type:
38 search_type = "user" if "@" in query else "book"
39
40 endpoints = {
41 "book": book_search,
42 "user": user_search,
43 "list": list_search,
44 }
45 if not search_type in endpoints:
46 search_type = "book"
47
48 return endpoints[search_type](request)
49
50
51 def api_book_search(request):
52 """Return books via API response"""
53 query = request.GET.get("q")
54 query = isbn_check(query)
55 min_confidence = request.GET.get("min_confidence", 0)
56 # only return local book results via json so we don't cascade
57 book_results = search(query, min_confidence=min_confidence)
58 return JsonResponse(
59 [format_search_result(r) for r in book_results[:10]], safe=False
60 )
61
62
63 def book_search(request):
64 """the real business is elsewhere"""
65 query = request.GET.get("q")
66 # check if query is isbn
67 query = isbn_check(query)
68 min_confidence = request.GET.get("min_confidence", 0)
69 search_remote = request.GET.get("remote", False) and request.user.is_authenticated
70
71 # try a local-only search
72 local_results = search(query, min_confidence=min_confidence)
73 paginated = Paginator(local_results, PAGE_LENGTH)
74 page = paginated.get_page(request.GET.get("page"))
75 data = {
76 "query": query,
77 "results": page,
78 "type": "book",
79 "remote": search_remote,
80 "page_range": paginated.get_elided_page_range(
81 page.number, on_each_side=2, on_ends=1
82 ),
83 }
84 # if a logged in user requested remote results or got no local results, try remote
85 if request.user.is_authenticated and (not local_results or search_remote):
86 data["remote_results"] = connector_manager.search(
87 query, min_confidence=min_confidence
88 )
89 data["remote"] = True
90 return TemplateResponse(request, "search/book.html", data)
91
92
93 def user_search(request):
94 """cool kids members only user search"""
95 viewer = request.user
96 query = request.GET.get("q")
97 query = query.strip()
98 data = {"type": "user", "query": query}
99 # logged out viewers can't search users
100 if not viewer.is_authenticated:
101 return TemplateResponse(request, "search/user.html", data)
102
103 # use webfinger for mastodon style [email protected] username to load the user if
104 # they don't exist locally (handle_remote_webfinger will check the db)
105 if re.match(regex.FULL_USERNAME, query):
106 handle_remote_webfinger(query)
107
108 results = (
109 models.User.viewer_aware_objects(viewer)
110 .annotate(
111 similarity=Greatest(
112 TrigramSimilarity("username", query),
113 TrigramSimilarity("localname", query),
114 )
115 )
116 .filter(
117 similarity__gt=0.5,
118 )
119 .order_by("-similarity")
120 )
121 paginated = Paginator(results, PAGE_LENGTH)
122 page = paginated.get_page(request.GET.get("page"))
123 data["results"] = page
124 data["page_range"] = paginated.get_elided_page_range(
125 page.number, on_each_side=2, on_ends=1
126 )
127 return TemplateResponse(request, "search/user.html", data)
128
129
130 def list_search(request):
131 """any relevent lists?"""
132 query = request.GET.get("q")
133 data = {"query": query, "type": "list"}
134 results = (
135 models.List.privacy_filter(
136 request.user,
137 privacy_levels=["public", "followers"],
138 )
139 .annotate(
140 similarity=Greatest(
141 TrigramSimilarity("name", query),
142 TrigramSimilarity("description", query),
143 )
144 )
145 .filter(
146 similarity__gt=0.1,
147 )
148 .order_by("-similarity")
149 )
150 paginated = Paginator(results, PAGE_LENGTH)
151 page = paginated.get_page(request.GET.get("page"))
152 data["results"] = page
153 data["page_range"] = paginated.get_elided_page_range(
154 page.number, on_each_side=2, on_ends=1
155 )
156 return TemplateResponse(request, "search/list.html", data)
157
158
159 def isbn_check(query):
160 """isbn10 or isbn13 check, if so remove separators"""
161 if query:
162 su_num = re.sub(r"(?<=\d)\D(?=\d|[xX])", "", query)
163 if len(su_num) == 13 and su_num.isdecimal():
164 # Multiply every other digit by 3
165 # Add these numbers and the other digits
166 product = sum(int(ch) for ch in su_num[::2]) + sum(
167 int(ch) * 3 for ch in su_num[1::2]
168 )
169 if product % 10 == 0:
170 return su_num
171 elif (
172 len(su_num) == 10
173 and su_num[:-1].isdecimal()
174 and (su_num[-1].isdecimal() or su_num[-1].lower() == "x")
175 ):
176 product = 0
177 # Iterate through code_string
178 for i in range(9):
179 # for each character, multiply by a different decreasing number: 10 - x
180 product = product + int(su_num[i]) * (10 - i)
181 # Handle last character
182 if su_num[9].lower() == "x":
183 product += 10
184 else:
185 product += int(su_num[9])
186 if product % 11 == 0:
187 return su_num
188 return query
189
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/bookwyrm/views/search.py b/bookwyrm/views/search.py
--- a/bookwyrm/views/search.py
+++ b/bookwyrm/views/search.py
@@ -91,18 +91,15 @@
def user_search(request):
- """cool kids members only user search"""
+ """user search: search for a user"""
viewer = request.user
query = request.GET.get("q")
query = query.strip()
data = {"type": "user", "query": query}
- # logged out viewers can't search users
- if not viewer.is_authenticated:
- return TemplateResponse(request, "search/user.html", data)
# use webfinger for mastodon style [email protected] username to load the user if
# they don't exist locally (handle_remote_webfinger will check the db)
- if re.match(regex.FULL_USERNAME, query):
+ if re.match(regex.FULL_USERNAME, query) and viewer.is_authenticated:
handle_remote_webfinger(query)
results = (
@@ -118,6 +115,11 @@
)
.order_by("-similarity")
)
+
+ # don't expose remote users
+ if not viewer.is_authenticated:
+ results = results.filter(local=True)
+
paginated = Paginator(results, PAGE_LENGTH)
page = paginated.get_page(request.GET.get("page"))
data["results"] = page
|
{"golden_diff": "diff --git a/bookwyrm/views/search.py b/bookwyrm/views/search.py\n--- a/bookwyrm/views/search.py\n+++ b/bookwyrm/views/search.py\n@@ -91,18 +91,15 @@\n \n \n def user_search(request):\n- \"\"\"cool kids members only user search\"\"\"\n+ \"\"\"user search: search for a user\"\"\"\n viewer = request.user\n query = request.GET.get(\"q\")\n query = query.strip()\n data = {\"type\": \"user\", \"query\": query}\n- # logged out viewers can't search users\n- if not viewer.is_authenticated:\n- return TemplateResponse(request, \"search/user.html\", data)\n \n # use webfinger for mastodon style [email protected] username to load the user if\n # they don't exist locally (handle_remote_webfinger will check the db)\n- if re.match(regex.FULL_USERNAME, query):\n+ if re.match(regex.FULL_USERNAME, query) and viewer.is_authenticated:\n handle_remote_webfinger(query)\n \n results = (\n@@ -118,6 +115,11 @@\n )\n .order_by(\"-similarity\")\n )\n+\n+ # don't expose remote users\n+ if not viewer.is_authenticated:\n+ results = results.filter(local=True)\n+\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n", "issue": "not able to search for a user un-logged in\nNo option to search for a user without being logged in even though it says books or users in the non logged in search field \r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Instance**\r\nbookwyrm.social\r\n\r\n---\r\n\r\n**Desktop (please complete the following information):**\r\niOS 12, Firefox\r\n\n", "before_files": [{"content": "\"\"\" search views\"\"\"\nimport re\n\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom django.core.paginator import Paginator\nfrom django.db.models.functions import Greatest\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.views import View\n\nfrom csp.decorators import csp_update\n\nfrom bookwyrm import models\nfrom bookwyrm.connectors import connector_manager\nfrom bookwyrm.book_search import search, format_search_result\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.utils import regex\nfrom .helpers import is_api_request\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\nclass Search(View):\n \"\"\"search users or books\"\"\"\n\n @csp_update(IMG_SRC=\"*\")\n def get(self, request):\n \"\"\"that search bar up top\"\"\"\n if is_api_request(request):\n return api_book_search(request)\n\n query = request.GET.get(\"q\")\n if not query:\n return TemplateResponse(request, \"search/book.html\")\n\n search_type = request.GET.get(\"type\")\n if query and not search_type:\n search_type = \"user\" if \"@\" in query else \"book\"\n\n endpoints = {\n \"book\": book_search,\n \"user\": user_search,\n \"list\": list_search,\n }\n if not search_type in endpoints:\n search_type = \"book\"\n\n return endpoints[search_type](request)\n\n\ndef api_book_search(request):\n \"\"\"Return books via API response\"\"\"\n query = request.GET.get(\"q\")\n query = isbn_check(query)\n min_confidence = request.GET.get(\"min_confidence\", 0)\n # only return local book results via json so we don't cascade\n book_results = search(query, min_confidence=min_confidence)\n return JsonResponse(\n [format_search_result(r) for r in book_results[:10]], safe=False\n )\n\n\ndef book_search(request):\n \"\"\"the real business is elsewhere\"\"\"\n query = request.GET.get(\"q\")\n # check if query is isbn\n query = isbn_check(query)\n min_confidence = request.GET.get(\"min_confidence\", 0)\n search_remote = request.GET.get(\"remote\", False) and request.user.is_authenticated\n\n # try a local-only search\n local_results = search(query, min_confidence=min_confidence)\n paginated = Paginator(local_results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"query\": query,\n \"results\": page,\n \"type\": \"book\",\n \"remote\": search_remote,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n # if a logged in user requested remote results or got no local results, try remote\n if request.user.is_authenticated and (not local_results or search_remote):\n data[\"remote_results\"] = connector_manager.search(\n query, min_confidence=min_confidence\n )\n data[\"remote\"] = True\n return TemplateResponse(request, \"search/book.html\", data)\n\n\ndef user_search(request):\n \"\"\"cool kids members only user search\"\"\"\n viewer = request.user\n query = request.GET.get(\"q\")\n query = query.strip()\n data = {\"type\": \"user\", \"query\": query}\n # logged out viewers can't search users\n if not viewer.is_authenticated:\n return TemplateResponse(request, \"search/user.html\", data)\n\n # use webfinger for mastodon style [email protected] username to load the user if\n # they don't exist locally (handle_remote_webfinger will check the db)\n if re.match(regex.FULL_USERNAME, query):\n handle_remote_webfinger(query)\n\n results = (\n models.User.viewer_aware_objects(viewer)\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"username\", query),\n TrigramSimilarity(\"localname\", query),\n )\n )\n .filter(\n similarity__gt=0.5,\n )\n .order_by(\"-similarity\")\n )\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n data[\"page_range\"] = paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n )\n return TemplateResponse(request, \"search/user.html\", data)\n\n\ndef list_search(request):\n \"\"\"any relevent lists?\"\"\"\n query = request.GET.get(\"q\")\n data = {\"query\": query, \"type\": \"list\"}\n results = (\n models.List.privacy_filter(\n request.user,\n privacy_levels=[\"public\", \"followers\"],\n )\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"name\", query),\n TrigramSimilarity(\"description\", query),\n )\n )\n .filter(\n similarity__gt=0.1,\n )\n .order_by(\"-similarity\")\n )\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n data[\"page_range\"] = paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n )\n return TemplateResponse(request, \"search/list.html\", data)\n\n\ndef isbn_check(query):\n \"\"\"isbn10 or isbn13 check, if so remove separators\"\"\"\n if query:\n su_num = re.sub(r\"(?<=\\d)\\D(?=\\d|[xX])\", \"\", query)\n if len(su_num) == 13 and su_num.isdecimal():\n # Multiply every other digit by 3\n # Add these numbers and the other digits\n product = sum(int(ch) for ch in su_num[::2]) + sum(\n int(ch) * 3 for ch in su_num[1::2]\n )\n if product % 10 == 0:\n return su_num\n elif (\n len(su_num) == 10\n and su_num[:-1].isdecimal()\n and (su_num[-1].isdecimal() or su_num[-1].lower() == \"x\")\n ):\n product = 0\n # Iterate through code_string\n for i in range(9):\n # for each character, multiply by a different decreasing number: 10 - x\n product = product + int(su_num[i]) * (10 - i)\n # Handle last character\n if su_num[9].lower() == \"x\":\n product += 10\n else:\n product += int(su_num[9])\n if product % 11 == 0:\n return su_num\n return query\n", "path": "bookwyrm/views/search.py"}], "after_files": [{"content": "\"\"\" search views\"\"\"\nimport re\n\nfrom django.contrib.postgres.search import TrigramSimilarity\nfrom django.core.paginator import Paginator\nfrom django.db.models.functions import Greatest\nfrom django.http import JsonResponse\nfrom django.template.response import TemplateResponse\nfrom django.views import View\n\nfrom csp.decorators import csp_update\n\nfrom bookwyrm import models\nfrom bookwyrm.connectors import connector_manager\nfrom bookwyrm.book_search import search, format_search_result\nfrom bookwyrm.settings import PAGE_LENGTH\nfrom bookwyrm.utils import regex\nfrom .helpers import is_api_request\nfrom .helpers import handle_remote_webfinger\n\n\n# pylint: disable= no-self-use\nclass Search(View):\n \"\"\"search users or books\"\"\"\n\n @csp_update(IMG_SRC=\"*\")\n def get(self, request):\n \"\"\"that search bar up top\"\"\"\n if is_api_request(request):\n return api_book_search(request)\n\n query = request.GET.get(\"q\")\n if not query:\n return TemplateResponse(request, \"search/book.html\")\n\n search_type = request.GET.get(\"type\")\n if query and not search_type:\n search_type = \"user\" if \"@\" in query else \"book\"\n\n endpoints = {\n \"book\": book_search,\n \"user\": user_search,\n \"list\": list_search,\n }\n if not search_type in endpoints:\n search_type = \"book\"\n\n return endpoints[search_type](request)\n\n\ndef api_book_search(request):\n \"\"\"Return books via API response\"\"\"\n query = request.GET.get(\"q\")\n query = isbn_check(query)\n min_confidence = request.GET.get(\"min_confidence\", 0)\n # only return local book results via json so we don't cascade\n book_results = search(query, min_confidence=min_confidence)\n return JsonResponse(\n [format_search_result(r) for r in book_results[:10]], safe=False\n )\n\n\ndef book_search(request):\n \"\"\"the real business is elsewhere\"\"\"\n query = request.GET.get(\"q\")\n # check if query is isbn\n query = isbn_check(query)\n min_confidence = request.GET.get(\"min_confidence\", 0)\n search_remote = request.GET.get(\"remote\", False) and request.user.is_authenticated\n\n # try a local-only search\n local_results = search(query, min_confidence=min_confidence)\n paginated = Paginator(local_results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data = {\n \"query\": query,\n \"results\": page,\n \"type\": \"book\",\n \"remote\": search_remote,\n \"page_range\": paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n ),\n }\n # if a logged in user requested remote results or got no local results, try remote\n if request.user.is_authenticated and (not local_results or search_remote):\n data[\"remote_results\"] = connector_manager.search(\n query, min_confidence=min_confidence\n )\n data[\"remote\"] = True\n return TemplateResponse(request, \"search/book.html\", data)\n\n\ndef user_search(request):\n \"\"\"user search: search for a user\"\"\"\n viewer = request.user\n query = request.GET.get(\"q\")\n query = query.strip()\n data = {\"type\": \"user\", \"query\": query}\n\n # use webfinger for mastodon style [email protected] username to load the user if\n # they don't exist locally (handle_remote_webfinger will check the db)\n if re.match(regex.FULL_USERNAME, query) and viewer.is_authenticated:\n handle_remote_webfinger(query)\n\n results = (\n models.User.viewer_aware_objects(viewer)\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"username\", query),\n TrigramSimilarity(\"localname\", query),\n )\n )\n .filter(\n similarity__gt=0.5,\n )\n .order_by(\"-similarity\")\n )\n\n # don't expose remote users\n if not viewer.is_authenticated:\n results = results.filter(local=True)\n\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n data[\"page_range\"] = paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n )\n return TemplateResponse(request, \"search/user.html\", data)\n\n\ndef list_search(request):\n \"\"\"any relevent lists?\"\"\"\n query = request.GET.get(\"q\")\n data = {\"query\": query, \"type\": \"list\"}\n results = (\n models.List.privacy_filter(\n request.user,\n privacy_levels=[\"public\", \"followers\"],\n )\n .annotate(\n similarity=Greatest(\n TrigramSimilarity(\"name\", query),\n TrigramSimilarity(\"description\", query),\n )\n )\n .filter(\n similarity__gt=0.1,\n )\n .order_by(\"-similarity\")\n )\n paginated = Paginator(results, PAGE_LENGTH)\n page = paginated.get_page(request.GET.get(\"page\"))\n data[\"results\"] = page\n data[\"page_range\"] = paginated.get_elided_page_range(\n page.number, on_each_side=2, on_ends=1\n )\n return TemplateResponse(request, \"search/list.html\", data)\n\n\ndef isbn_check(query):\n \"\"\"isbn10 or isbn13 check, if so remove separators\"\"\"\n if query:\n su_num = re.sub(r\"(?<=\\d)\\D(?=\\d|[xX])\", \"\", query)\n if len(su_num) == 13 and su_num.isdecimal():\n # Multiply every other digit by 3\n # Add these numbers and the other digits\n product = sum(int(ch) for ch in su_num[::2]) + sum(\n int(ch) * 3 for ch in su_num[1::2]\n )\n if product % 10 == 0:\n return su_num\n elif (\n len(su_num) == 10\n and su_num[:-1].isdecimal()\n and (su_num[-1].isdecimal() or su_num[-1].lower() == \"x\")\n ):\n product = 0\n # Iterate through code_string\n for i in range(9):\n # for each character, multiply by a different decreasing number: 10 - x\n product = product + int(su_num[i]) * (10 - i)\n # Handle last character\n if su_num[9].lower() == \"x\":\n product += 10\n else:\n product += int(su_num[9])\n if product % 11 == 0:\n return su_num\n return query\n", "path": "bookwyrm/views/search.py"}]}
| 2,437 | 310 |
gh_patches_debug_29372
|
rasdani/github-patches
|
git_diff
|
conda__conda-6752
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
conda is broken if your home directory is read-only
Conda currently requires the user's home directory to be writable.
If the directory conda is installed into is writable (say a tmpfs) then you can get along way by using
```shell
./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f
conda config --system --set always_yes yes
conda config --system --set changeps1 no
conda config --system --add envs_dirs $CONDA_DIR/envs
conda config --system --add pkgs_dirs $CONDA_DIR/pkgs
```
However, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18
```python
USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
```
I'm not sure if this would even work on Windows?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `conda/core/envs_manager.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 from __future__ import absolute_import, division, print_function, unicode_literals
3
4 from logging import getLogger
5 from os import listdir
6 from os.path import dirname, isdir, isfile, join, normpath, split as path_split
7
8 from ..base.constants import ROOT_ENV_NAME
9 from ..base.context import context
10 from ..common.compat import ensure_text_type, on_win, open
11 from ..common.path import expand, paths_equal
12 from ..gateways.disk.read import yield_lines
13 from ..gateways.disk.test import is_conda_environment
14
15 log = getLogger(__name__)
16
17
18 USER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))
19
20
21 def register_env(location):
22 location = normpath(location)
23
24 if "placehold_pl" in location:
25 # Don't record envs created by conda-build.
26 return
27
28 if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):
29 # Nothing to do. Location is already recorded in a known environments.txt file.
30 return
31
32 with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
33 fh.write(ensure_text_type(location))
34 fh.write('\n')
35
36
37 def unregister_env(location):
38 if isdir(location):
39 meta_dir = join(location, 'conda-meta')
40 if isdir(meta_dir):
41 meta_dir_contents = listdir(meta_dir)
42 if len(meta_dir_contents) > 1:
43 # if there are any files left other than 'conda-meta/history'
44 # then don't unregister
45 return
46
47 _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)
48
49
50 def list_all_known_prefixes():
51 all_env_paths = set()
52 if on_win:
53 home_dir_dir = dirname(expand('~'))
54 for home_dir in listdir(home_dir_dir):
55 environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')
56 if isfile(environments_txt_file):
57 all_env_paths.update(_clean_environments_txt(environments_txt_file))
58 else:
59 from os import geteuid
60 from pwd import getpwall
61 if geteuid() == 0:
62 search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)
63 else:
64 search_dirs = (expand('~'),)
65 for home_dir in search_dirs:
66 environments_txt_file = join(home_dir, '.conda', 'environments.txt')
67 if isfile(environments_txt_file):
68 all_env_paths.update(_clean_environments_txt(environments_txt_file))
69
70 # in case environments.txt files aren't complete, also add all known conda environments in
71 # all envs_dirs
72 envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))
73 all_env_paths.update(path for path in (
74 join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)
75 ) if path not in all_env_paths and is_conda_environment(path))
76
77 all_env_paths.add(context.root_prefix)
78 return sorted(all_env_paths)
79
80
81 def env_name(prefix):
82 if not prefix:
83 return None
84 if paths_equal(prefix, context.root_prefix):
85 return ROOT_ENV_NAME
86 maybe_envs_dir, maybe_name = path_split(prefix)
87 for envs_dir in context.envs_dirs:
88 if paths_equal(envs_dir, maybe_envs_dir):
89 return maybe_name
90 return prefix
91
92
93 def _clean_environments_txt(environments_txt_file, remove_location=None):
94 if not isfile(environments_txt_file):
95 return ()
96
97 if remove_location:
98 remove_location = normpath(remove_location)
99 environments_txt_lines = tuple(yield_lines(environments_txt_file))
100 environments_txt_lines_cleaned = tuple(
101 prefix for prefix in environments_txt_lines
102 if prefix != remove_location and is_conda_environment(prefix)
103 )
104 if environments_txt_lines_cleaned != environments_txt_lines:
105 _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)
106 return environments_txt_lines_cleaned
107
108
109 def _rewrite_environments_txt(environments_txt_file, prefixes):
110 try:
111 with open(environments_txt_file, 'w') as fh:
112 fh.write('\n'.join(prefixes))
113 fh.write('\n')
114 except (IOError, OSError) as e:
115 log.info("File not cleaned: %s", environments_txt_file)
116 log.debug('%r', e, exc_info=True)
117
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py
--- a/conda/core/envs_manager.py
+++ b/conda/core/envs_manager.py
@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, unicode_literals
+from errno import EACCES
from logging import getLogger
from os import listdir
from os.path import dirname, isdir, isfile, join, normpath, split as path_split
@@ -29,9 +30,17 @@
# Nothing to do. Location is already recorded in a known environments.txt file.
return
- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
- fh.write(ensure_text_type(location))
- fh.write('\n')
+ try:
+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:
+ fh.write(ensure_text_type(location))
+ fh.write('\n')
+ except EnvironmentError as e:
+ if e.errno == EACCES:
+ log.warn("Unable to register environment. Path not writable.\n"
+ " environment location: %s\n"
+ " registry file: %s", location, USER_ENVIRONMENTS_TXT_FILE)
+ else:
+ raise
def unregister_env(location):
@@ -111,6 +120,6 @@
with open(environments_txt_file, 'w') as fh:
fh.write('\n'.join(prefixes))
fh.write('\n')
- except (IOError, OSError) as e:
+ except EnvironmentError as e:
log.info("File not cleaned: %s", environments_txt_file)
log.debug('%r', e, exc_info=True)
|
{"golden_diff": "diff --git a/conda/core/envs_manager.py b/conda/core/envs_manager.py\n--- a/conda/core/envs_manager.py\n+++ b/conda/core/envs_manager.py\n@@ -1,6 +1,7 @@\n # -*- coding: utf-8 -*-\n from __future__ import absolute_import, division, print_function, unicode_literals\n \n+from errno import EACCES\n from logging import getLogger\n from os import listdir\n from os.path import dirname, isdir, isfile, join, normpath, split as path_split\n@@ -29,9 +30,17 @@\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n \n- with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n- fh.write(ensure_text_type(location))\n- fh.write('\\n')\n+ try:\n+ with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n+ fh.write(ensure_text_type(location))\n+ fh.write('\\n')\n+ except EnvironmentError as e:\n+ if e.errno == EACCES:\n+ log.warn(\"Unable to register environment. Path not writable.\\n\"\n+ \" environment location: %s\\n\"\n+ \" registry file: %s\", location, USER_ENVIRONMENTS_TXT_FILE)\n+ else:\n+ raise\n \n \n def unregister_env(location):\n@@ -111,6 +120,6 @@\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n- except (IOError, OSError) as e:\n+ except EnvironmentError as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "issue": "conda is broken if your home directory is read-only\nConda currently requires the user's home directory to be writable.\r\n\r\nIf the directory conda is installed into is writable (say a tmpfs) then you can get along way by using \r\n```shell\r\n\t\t./Miniconda3-latest-Linux-x86_64.sh -p $CONDA_DIR -b -f\r\n\t\tconda config --system --set always_yes yes\r\n\t\tconda config --system --set changeps1 no\r\n\t\tconda config --system --add envs_dirs $CONDA_DIR/envs\r\n\t\tconda config --system --add pkgs_dirs $CONDA_DIR/pkgs\r\n```\r\n\r\nHowever, this is foiled by the following line -> https://github.com/conda/conda/blob/7616b87ad87b80da16b8263011c9c708be98147c/conda/core/envs_manager.py#L18\r\n\r\n```python\r\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\r\n```\r\n\r\nI'm not sure if this would even work on Windows?\r\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom logging import getLogger\nfrom os import listdir\nfrom os.path import dirname, isdir, isfile, join, normpath, split as path_split\n\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, on_win, open\nfrom ..common.path import expand, paths_equal\nfrom ..gateways.disk.read import yield_lines\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\n\n\ndef register_env(location):\n location = normpath(location)\n\n if \"placehold_pl\" in location:\n # Don't record envs created by conda-build.\n return\n\n if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n\n with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n fh.write(ensure_text_type(location))\n fh.write('\\n')\n\n\ndef unregister_env(location):\n if isdir(location):\n meta_dir = join(location, 'conda-meta')\n if isdir(meta_dir):\n meta_dir_contents = listdir(meta_dir)\n if len(meta_dir_contents) > 1:\n # if there are any files left other than 'conda-meta/history'\n # then don't unregister\n return\n\n _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)\n\n\ndef list_all_known_prefixes():\n all_env_paths = set()\n if on_win:\n home_dir_dir = dirname(expand('~'))\n for home_dir in listdir(home_dir_dir):\n environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n else:\n from os import geteuid\n from pwd import getpwall\n if geteuid() == 0:\n search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)\n else:\n search_dirs = (expand('~'),)\n for home_dir in search_dirs:\n environments_txt_file = join(home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n\n # in case environments.txt files aren't complete, also add all known conda environments in\n # all envs_dirs\n envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))\n all_env_paths.update(path for path in (\n join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)\n ) if path not in all_env_paths and is_conda_environment(path))\n\n all_env_paths.add(context.root_prefix)\n return sorted(all_env_paths)\n\n\ndef env_name(prefix):\n if not prefix:\n return None\n if paths_equal(prefix, context.root_prefix):\n return ROOT_ENV_NAME\n maybe_envs_dir, maybe_name = path_split(prefix)\n for envs_dir in context.envs_dirs:\n if paths_equal(envs_dir, maybe_envs_dir):\n return maybe_name\n return prefix\n\n\ndef _clean_environments_txt(environments_txt_file, remove_location=None):\n if not isfile(environments_txt_file):\n return ()\n\n if remove_location:\n remove_location = normpath(remove_location)\n environments_txt_lines = tuple(yield_lines(environments_txt_file))\n environments_txt_lines_cleaned = tuple(\n prefix for prefix in environments_txt_lines\n if prefix != remove_location and is_conda_environment(prefix)\n )\n if environments_txt_lines_cleaned != environments_txt_lines:\n _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)\n return environments_txt_lines_cleaned\n\n\ndef _rewrite_environments_txt(environments_txt_file, prefixes):\n try:\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n except (IOError, OSError) as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "path": "conda/core/envs_manager.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom errno import EACCES\nfrom logging import getLogger\nfrom os import listdir\nfrom os.path import dirname, isdir, isfile, join, normpath, split as path_split\n\nfrom ..base.constants import ROOT_ENV_NAME\nfrom ..base.context import context\nfrom ..common.compat import ensure_text_type, on_win, open\nfrom ..common.path import expand, paths_equal\nfrom ..gateways.disk.read import yield_lines\nfrom ..gateways.disk.test import is_conda_environment\n\nlog = getLogger(__name__)\n\n\nUSER_ENVIRONMENTS_TXT_FILE = expand(join('~', '.conda', 'environments.txt'))\n\n\ndef register_env(location):\n location = normpath(location)\n\n if \"placehold_pl\" in location:\n # Don't record envs created by conda-build.\n return\n\n if location in yield_lines(USER_ENVIRONMENTS_TXT_FILE):\n # Nothing to do. Location is already recorded in a known environments.txt file.\n return\n\n try:\n with open(USER_ENVIRONMENTS_TXT_FILE, 'a') as fh:\n fh.write(ensure_text_type(location))\n fh.write('\\n')\n except EnvironmentError as e:\n if e.errno == EACCES:\n log.warn(\"Unable to register environment. Path not writable.\\n\"\n \" environment location: %s\\n\"\n \" registry file: %s\", location, USER_ENVIRONMENTS_TXT_FILE)\n else:\n raise\n\n\ndef unregister_env(location):\n if isdir(location):\n meta_dir = join(location, 'conda-meta')\n if isdir(meta_dir):\n meta_dir_contents = listdir(meta_dir)\n if len(meta_dir_contents) > 1:\n # if there are any files left other than 'conda-meta/history'\n # then don't unregister\n return\n\n _clean_environments_txt(USER_ENVIRONMENTS_TXT_FILE, location)\n\n\ndef list_all_known_prefixes():\n all_env_paths = set()\n if on_win:\n home_dir_dir = dirname(expand('~'))\n for home_dir in listdir(home_dir_dir):\n environments_txt_file = join(home_dir_dir, home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n else:\n from os import geteuid\n from pwd import getpwall\n if geteuid() == 0:\n search_dirs = tuple(pwentry.pw_dir for pwentry in getpwall()) or (expand('~'),)\n else:\n search_dirs = (expand('~'),)\n for home_dir in search_dirs:\n environments_txt_file = join(home_dir, '.conda', 'environments.txt')\n if isfile(environments_txt_file):\n all_env_paths.update(_clean_environments_txt(environments_txt_file))\n\n # in case environments.txt files aren't complete, also add all known conda environments in\n # all envs_dirs\n envs_dirs = (envs_dir for envs_dir in context.envs_dirs if isdir(envs_dir))\n all_env_paths.update(path for path in (\n join(envs_dir, name) for envs_dir in envs_dirs for name in listdir(envs_dir)\n ) if path not in all_env_paths and is_conda_environment(path))\n\n all_env_paths.add(context.root_prefix)\n return sorted(all_env_paths)\n\n\ndef env_name(prefix):\n if not prefix:\n return None\n if paths_equal(prefix, context.root_prefix):\n return ROOT_ENV_NAME\n maybe_envs_dir, maybe_name = path_split(prefix)\n for envs_dir in context.envs_dirs:\n if paths_equal(envs_dir, maybe_envs_dir):\n return maybe_name\n return prefix\n\n\ndef _clean_environments_txt(environments_txt_file, remove_location=None):\n if not isfile(environments_txt_file):\n return ()\n\n if remove_location:\n remove_location = normpath(remove_location)\n environments_txt_lines = tuple(yield_lines(environments_txt_file))\n environments_txt_lines_cleaned = tuple(\n prefix for prefix in environments_txt_lines\n if prefix != remove_location and is_conda_environment(prefix)\n )\n if environments_txt_lines_cleaned != environments_txt_lines:\n _rewrite_environments_txt(environments_txt_file, environments_txt_lines_cleaned)\n return environments_txt_lines_cleaned\n\n\ndef _rewrite_environments_txt(environments_txt_file, prefixes):\n try:\n with open(environments_txt_file, 'w') as fh:\n fh.write('\\n'.join(prefixes))\n fh.write('\\n')\n except EnvironmentError as e:\n log.info(\"File not cleaned: %s\", environments_txt_file)\n log.debug('%r', e, exc_info=True)\n", "path": "conda/core/envs_manager.py"}]}
| 1,718 | 390 |
gh_patches_debug_17939
|
rasdani/github-patches
|
git_diff
|
GeotrekCE__Geotrek-admin-1030
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Pragma no cache for path graph json
As in https://github.com/makinacorpus/django-mapentity/pull/48
Related https://github.com/makinacorpus/Geotrek/issues/1026
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `geotrek/core/views.py`
Content:
```
1 # -*- coding: utf-8 -*-
2 import json
3 from django.http import HttpResponse
4 from django.utils.decorators import method_decorator
5 from django.contrib.auth.decorators import login_required
6 from django.views.decorators.http import last_modified as cache_last_modified
7 from django.views.generic.edit import BaseDetailView
8 from django.core.cache import get_cache
9 from django.shortcuts import redirect
10
11 from mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,
12 MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,
13 MapEntityDelete, MapEntityFormat,
14 JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)
15
16 from geotrek.authent.decorators import path_manager_required, same_structure_required
17
18 from .models import Path, Trail
19 from .forms import PathForm
20 from .filters import PathFilter
21 from . import graph as graph_lib
22
23
24 @login_required
25 def last_list(request):
26 last = request.session.get('last_list') # set in MapEntityList
27 if not last:
28 return redirect('core:path_list')
29 return redirect(last)
30
31 home = last_list
32
33
34 class HttpSVGResponse(HttpResponse):
35 content_type = 'image/svg+xml'
36 def __init__(self, content='', **kwargs):
37 kwargs['content_type'] = self.content_type
38 super(HttpSVGResponse, self).__init__(content, **kwargs)
39
40
41 class ElevationChart(LastModifiedMixin, BaseDetailView):
42
43 @method_decorator(login_required)
44 def dispatch(self, *args, **kwargs):
45 return super(ElevationChart, self).dispatch(*args, **kwargs)
46
47 def render_to_response(self, context, **response_kwargs):
48 return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),
49 **response_kwargs)
50
51
52 class ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
53 """Extract elevation profile from a path and return it as JSON"""
54
55 @method_decorator(login_required)
56 def dispatch(self, *args, **kwargs):
57 return super(ElevationProfile, self).dispatch(*args, **kwargs)
58
59 def get_context_data(self, **kwargs):
60 """
61 Put elevation profile into response context.
62 """
63 obj = self.get_object()
64 data = {}
65 # Formatted as distance, elevation, [lng, lat]
66 for step in obj.get_elevation_profile():
67 formatted = step[0], step[3], step[1:3]
68 data.setdefault('profile', []).append(formatted)
69 return data
70
71
72 class ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):
73 """Extract elevation profile on an area and return it as JSON"""
74
75 @method_decorator(login_required)
76 def dispatch(self, *args, **kwargs):
77 return super(ElevationArea, self).dispatch(*args, **kwargs)
78
79 def get_context_data(self, **kwargs):
80 obj = self.get_object()
81 return obj.get_elevation_area()
82
83
84 class PathLayer(MapEntityLayer):
85 model = Path
86 properties = ['name']
87
88
89 class PathList(MapEntityList):
90 queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')
91 filterform = PathFilter
92 columns = ['id', 'name', 'networks', 'stake', 'trail']
93
94
95 class PathJsonList(MapEntityJsonList, PathList):
96 pass
97
98
99 class PathFormatList(MapEntityFormat, PathList):
100 pass
101
102
103 class PathDetail(MapEntityDetail):
104 model = Path
105
106 def can_edit(self):
107 return self.request.user.is_superuser or \
108 (hasattr(self.request.user, 'profile') and \
109 self.request.user.profile.is_path_manager and \
110 self.get_object().same_structure(self.request.user))
111
112
113 class PathDocument(MapEntityDocument):
114 model = Path
115
116 def get_context_data(self, *args, **kwargs):
117 self.get_object().prepare_elevation_chart(self.request)
118 return super(PathDocument, self).get_context_data(*args, **kwargs)
119
120
121 class PathCreate(MapEntityCreate):
122 model = Path
123 form_class = PathForm
124
125 @method_decorator(path_manager_required('core:path_list'))
126 def dispatch(self, *args, **kwargs):
127 return super(PathCreate, self).dispatch(*args, **kwargs)
128
129
130 class PathUpdate(MapEntityUpdate):
131 model = Path
132 form_class = PathForm
133
134 @method_decorator(path_manager_required('core:path_detail'))
135 @same_structure_required('core:path_detail')
136 def dispatch(self, *args, **kwargs):
137 return super(PathUpdate, self).dispatch(*args, **kwargs)
138
139
140 class PathDelete(MapEntityDelete):
141 model = Path
142
143 @method_decorator(path_manager_required('core:path_detail'))
144 @same_structure_required('core:path_detail')
145 def dispatch(self, *args, **kwargs):
146 return super(PathDelete, self).dispatch(*args, **kwargs)
147
148
149 @login_required
150 @cache_last_modified(lambda x: Path.latest_updated())
151 def get_graph_json(request):
152 cache = get_cache('fat')
153 key = 'path_graph_json'
154
155 result = cache.get(key)
156 latest = Path.latest_updated()
157
158 if result and latest:
159 cache_latest, json_graph = result
160 # Not empty and still valid
161 if cache_latest and cache_latest >= latest:
162 return HttpJSONResponse(json_graph)
163
164 # cache does not exist or is not up to date
165 # rebuild the graph and cache the json
166 graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())
167 json_graph = json.dumps(graph)
168
169 cache.set(key, (latest, json_graph))
170 return HttpJSONResponse(json_graph)
171
172
173 class TrailDetail(MapEntityDetail):
174 model = Trail
175
176 def can_edit(self):
177 return False
178
179
180 class TrailDocument(MapEntityDocument):
181 model = Trail
182
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/geotrek/core/views.py b/geotrek/core/views.py
--- a/geotrek/core/views.py
+++ b/geotrek/core/views.py
@@ -4,6 +4,7 @@
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import last_modified as cache_last_modified
+from django.views.decorators.cache import never_cache as force_cache_validation
from django.views.generic.edit import BaseDetailView
from django.core.cache import get_cache
from django.shortcuts import redirect
@@ -148,6 +149,7 @@
@login_required
@cache_last_modified(lambda x: Path.latest_updated())
+@force_cache_validation
def get_graph_json(request):
cache = get_cache('fat')
key = 'path_graph_json'
|
{"golden_diff": "diff --git a/geotrek/core/views.py b/geotrek/core/views.py\n--- a/geotrek/core/views.py\n+++ b/geotrek/core/views.py\n@@ -4,6 +4,7 @@\n from django.utils.decorators import method_decorator\n from django.contrib.auth.decorators import login_required\n from django.views.decorators.http import last_modified as cache_last_modified\n+from django.views.decorators.cache import never_cache as force_cache_validation\n from django.views.generic.edit import BaseDetailView\n from django.core.cache import get_cache\n from django.shortcuts import redirect\n@@ -148,6 +149,7 @@\n \n @login_required\n @cache_last_modified(lambda x: Path.latest_updated())\n+@force_cache_validation\n def get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n", "issue": "Pragma no cache for path graph json\nAs in https://github.com/makinacorpus/django-mapentity/pull/48\n\nRelated https://github.com/makinacorpus/Geotrek/issues/1026\n\n", "before_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.generic.edit import BaseDetailView\nfrom django.core.cache import get_cache\nfrom django.shortcuts import redirect\n\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat,\n JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import path_manager_required, same_structure_required\n\nfrom .models import Path, Trail\nfrom .forms import PathForm\nfrom .filters import PathFilter\nfrom . import graph as graph_lib\n\n\n@login_required\ndef last_list(request):\n last = request.session.get('last_list') # set in MapEntityList\n if not last:\n return redirect('core:path_list')\n return redirect(last)\n\nhome = last_list\n\n\nclass HttpSVGResponse(HttpResponse):\n content_type = 'image/svg+xml'\n def __init__(self, content='', **kwargs):\n kwargs['content_type'] = self.content_type\n super(HttpSVGResponse, self).__init__(content, **kwargs)\n\n\nclass ElevationChart(LastModifiedMixin, BaseDetailView):\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationChart, self).dispatch(*args, **kwargs)\n\n def render_to_response(self, context, **response_kwargs):\n return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),\n **response_kwargs)\n\n\nclass ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile from a path and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationProfile, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Put elevation profile into response context.\n \"\"\"\n obj = self.get_object()\n data = {}\n # Formatted as distance, elevation, [lng, lat]\n for step in obj.get_elevation_profile():\n formatted = step[0], step[3], step[1:3]\n data.setdefault('profile', []).append(formatted)\n return data\n\n\nclass ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile on an area and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationArea, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n obj = self.get_object()\n return obj.get_elevation_area()\n\n\nclass PathLayer(MapEntityLayer):\n model = Path\n properties = ['name']\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')\n filterform = PathFilter\n columns = ['id', 'name', 'networks', 'stake', 'trail']\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n pass\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n pass\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def can_edit(self):\n return self.request.user.is_superuser or \\\n (hasattr(self.request.user, 'profile') and \\\n self.request.user.profile.is_path_manager and \\\n self.get_object().same_structure(self.request.user))\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n self.get_object().prepare_elevation_chart(self.request)\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_list'))\n def dispatch(self, *args, **kwargs):\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\ndef get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailDetail(MapEntityDetail):\n model = Trail\n\n def can_edit(self):\n return False\n\n\nclass TrailDocument(MapEntityDocument):\n model = Trail\n", "path": "geotrek/core/views.py"}], "after_files": [{"content": "# -*- coding: utf-8 -*-\nimport json\nfrom django.http import HttpResponse\nfrom django.utils.decorators import method_decorator\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.http import last_modified as cache_last_modified\nfrom django.views.decorators.cache import never_cache as force_cache_validation\nfrom django.views.generic.edit import BaseDetailView\nfrom django.core.cache import get_cache\nfrom django.shortcuts import redirect\n\nfrom mapentity.views import (MapEntityLayer, MapEntityList, MapEntityJsonList,\n MapEntityDetail, MapEntityDocument, MapEntityCreate, MapEntityUpdate,\n MapEntityDelete, MapEntityFormat,\n JSONResponseMixin, HttpJSONResponse, LastModifiedMixin)\n\nfrom geotrek.authent.decorators import path_manager_required, same_structure_required\n\nfrom .models import Path, Trail\nfrom .forms import PathForm\nfrom .filters import PathFilter\nfrom . import graph as graph_lib\n\n\n@login_required\ndef last_list(request):\n last = request.session.get('last_list') # set in MapEntityList\n if not last:\n return redirect('core:path_list')\n return redirect(last)\n\nhome = last_list\n\n\nclass HttpSVGResponse(HttpResponse):\n content_type = 'image/svg+xml'\n def __init__(self, content='', **kwargs):\n kwargs['content_type'] = self.content_type\n super(HttpSVGResponse, self).__init__(content, **kwargs)\n\n\nclass ElevationChart(LastModifiedMixin, BaseDetailView):\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationChart, self).dispatch(*args, **kwargs)\n\n def render_to_response(self, context, **response_kwargs):\n return HttpSVGResponse(self.get_object().get_elevation_profile_svg(),\n **response_kwargs)\n\n\nclass ElevationProfile(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile from a path and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationProfile, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n \"\"\"\n Put elevation profile into response context.\n \"\"\"\n obj = self.get_object()\n data = {}\n # Formatted as distance, elevation, [lng, lat]\n for step in obj.get_elevation_profile():\n formatted = step[0], step[3], step[1:3]\n data.setdefault('profile', []).append(formatted)\n return data\n\n\nclass ElevationArea(LastModifiedMixin, JSONResponseMixin, BaseDetailView):\n \"\"\"Extract elevation profile on an area and return it as JSON\"\"\"\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(ElevationArea, self).dispatch(*args, **kwargs)\n\n def get_context_data(self, **kwargs):\n obj = self.get_object()\n return obj.get_elevation_area()\n\n\nclass PathLayer(MapEntityLayer):\n model = Path\n properties = ['name']\n\n\nclass PathList(MapEntityList):\n queryset = Path.objects.prefetch_related('networks').select_related('stake', 'trail')\n filterform = PathFilter\n columns = ['id', 'name', 'networks', 'stake', 'trail']\n\n\nclass PathJsonList(MapEntityJsonList, PathList):\n pass\n\n\nclass PathFormatList(MapEntityFormat, PathList):\n pass\n\n\nclass PathDetail(MapEntityDetail):\n model = Path\n\n def can_edit(self):\n return self.request.user.is_superuser or \\\n (hasattr(self.request.user, 'profile') and \\\n self.request.user.profile.is_path_manager and \\\n self.get_object().same_structure(self.request.user))\n\n\nclass PathDocument(MapEntityDocument):\n model = Path\n\n def get_context_data(self, *args, **kwargs):\n self.get_object().prepare_elevation_chart(self.request)\n return super(PathDocument, self).get_context_data(*args, **kwargs)\n\n\nclass PathCreate(MapEntityCreate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_list'))\n def dispatch(self, *args, **kwargs):\n return super(PathCreate, self).dispatch(*args, **kwargs)\n\n\nclass PathUpdate(MapEntityUpdate):\n model = Path\n form_class = PathForm\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathUpdate, self).dispatch(*args, **kwargs)\n\n\nclass PathDelete(MapEntityDelete):\n model = Path\n\n @method_decorator(path_manager_required('core:path_detail'))\n @same_structure_required('core:path_detail')\n def dispatch(self, *args, **kwargs):\n return super(PathDelete, self).dispatch(*args, **kwargs)\n\n\n@login_required\n@cache_last_modified(lambda x: Path.latest_updated())\n@force_cache_validation\ndef get_graph_json(request):\n cache = get_cache('fat')\n key = 'path_graph_json'\n\n result = cache.get(key)\n latest = Path.latest_updated()\n\n if result and latest:\n cache_latest, json_graph = result\n # Not empty and still valid\n if cache_latest and cache_latest >= latest:\n return HttpJSONResponse(json_graph)\n\n # cache does not exist or is not up to date\n # rebuild the graph and cache the json\n graph = graph_lib.graph_edges_nodes_of_qs(Path.objects.all())\n json_graph = json.dumps(graph)\n\n cache.set(key, (latest, json_graph))\n return HttpJSONResponse(json_graph)\n\n\nclass TrailDetail(MapEntityDetail):\n model = Trail\n\n def can_edit(self):\n return False\n\n\nclass TrailDocument(MapEntityDocument):\n model = Trail\n", "path": "geotrek/core/views.py"}]}
| 1,992 | 171 |
gh_patches_debug_3724
|
rasdani/github-patches
|
git_diff
|
fal-ai__dbt-fal-344
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Make fal scripts __name__ == '__main__'
**Context**
When users test scripts they will normally have a
```py
if __name__ == '__main__':
main()
```
and the `main()` function will be where the whole script lives.
When a script is associated with a model, it is executed "directly". So it could be considered the "__main__" script.
**Describe alternatives you've considered**
Going for the dbt interface of offering a function.
```py
# for models
def model():
pass
```
```py
# for hooks/after/before scripts
def hook():
pass
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `src/fal/fal_script.py`
Content:
```
1 import os
2 import json
3 from typing import Dict, Any, List, Optional, Union
4 from pathlib import Path
5 from functools import partial
6 from dataclasses import dataclass
7 from deprecation import deprecated
8
9 from faldbt.parse import normalize_path
10 from faldbt.project import DbtModel, FalDbt
11 import faldbt.lib as lib
12
13 from dbt.contracts.results import RunStatus
14 from dbt.config.runtime import RuntimeConfig
15 from dbt.logger import GLOBAL_LOGGER as logger
16
17 if lib.DBT_VCURRENT.compare(lib.DBT_V1) >= 0:
18 from dbt.contracts.graph.parsed import ColumnInfo
19 else:
20 from faldbt.cp.contracts.graph.parsed import ColumnInfo
21
22
23 @dataclass
24 class CurrentModel:
25 name: str
26 alias: str
27 status: RunStatus
28 columns: Dict[str, ColumnInfo]
29 tests: List[Any]
30 meta: Dict[Any, Any]
31
32
33 @dataclass
34 class CurrentTest:
35 name: str
36 model_name: str
37 column: str
38 status: str
39
40 @property
41 @deprecated(details="Use 'model_name' instead")
42 def modelname(self):
43 return self.model_name
44
45
46 @dataclass
47 class ContextConfig:
48 target_path: Path
49
50 def __init__(self, config: RuntimeConfig):
51 self.target_path = Path(
52 os.path.realpath(os.path.join(config.project_root, config.target_path))
53 )
54
55
56 @dataclass
57 class Context:
58 current_model: Union[CurrentModel, None]
59 config: ContextConfig
60
61
62 @dataclass(frozen=True, init=False)
63 class FalScript:
64 model: Optional[DbtModel]
65 path: Path
66 _faldbt: FalDbt
67
68 def __init__(self, faldbt: FalDbt, model: Optional[DbtModel], path: str):
69 # Necessary because of frozen=True
70 object.__setattr__(self, "model", model)
71 object.__setattr__(self, "path", normalize_path(faldbt.scripts_dir, path))
72 object.__setattr__(self, "_faldbt", faldbt)
73
74 @classmethod
75 def model_script(cls, faldbt: FalDbt, model: DbtModel):
76 script = FalScript(faldbt, model, "")
77 # HACK: Set the script path specially for this case
78 object.__setattr__(script, "path", model.python_model)
79 return script
80
81 def exec(self, faldbt: FalDbt):
82 """
83 Executes the script
84 """
85 # Enable local imports
86 try:
87 source_code = python_from_file(self.path)
88 program = compile(source_code, self.path, "exec")
89
90 exec_globals = {
91 "context": self._build_script_context(),
92 "ref": faldbt.ref,
93 "source": faldbt.source,
94 "write_to_source": faldbt.write_to_source,
95 "write_to_firestore": faldbt.write_to_firestore,
96 "list_models": faldbt.list_models,
97 "list_models_ids": faldbt.list_models_ids,
98 "list_sources": faldbt.list_sources,
99 "list_features": faldbt.list_features,
100 "el": faldbt.el,
101 }
102
103 if self.model is not None:
104 # Hard-wire the model
105 exec_globals["write_to_model"] = partial(
106 faldbt.write_to_model, target_1=self.model.name, target_2=None
107 )
108
109 exec(program, exec_globals)
110 finally:
111 pass
112
113 @property
114 def id(self):
115 # TODO: maybe `self.path - project_dir`, to show only relevant path
116 return f"({self.model_name},{self.path})"
117
118 @property
119 def is_global(self):
120 return self.model is None
121
122 @property
123 def model_name(self):
124 return "<GLOBAL>" if self.is_global else self.model.name # type: ignore
125
126 def _build_script_context(self):
127 context_config = ContextConfig(self._faldbt._config)
128 if self.is_global:
129 return Context(current_model=None, config=context_config)
130
131 model: DbtModel = self.model # type: ignore
132
133 meta = model.meta
134 _del_key(meta, self._faldbt.keyword)
135
136 tests = _process_tests(model.tests)
137
138 current_model = CurrentModel(
139 name=model.name,
140 alias=model.alias,
141 status=model.status,
142 columns=model.columns,
143 tests=tests,
144 meta=meta,
145 )
146
147 return Context(current_model=current_model, config=context_config)
148
149
150 def _del_key(dict: Dict[str, Any], key: str):
151 try:
152 del dict[key]
153 except KeyError:
154 pass
155
156
157 def _process_tests(tests: List[Any]):
158 return list(
159 map(
160 lambda test: CurrentTest(
161 name=test.name,
162 column=test.column,
163 status=test.status,
164 model_name=test.model,
165 ),
166 tests,
167 )
168 )
169
170
171 def python_from_file(path: Path) -> str:
172 with open(path) as file:
173 raw_source_code = file.read()
174 if path.suffix == ".ipynb":
175 raw_source_code = _process_ipynb(raw_source_code)
176 return raw_source_code
177
178
179 def _process_ipynb(raw_source_code: str) -> str:
180 def strip_magic(source: List[str]) -> List[str]:
181 NOTEBOOK_LIB = "faldbt.magics"
182 return [item for item in source if item[0] != "%" and NOTEBOOK_LIB not in item]
183
184 ipynb_struct = json.loads(raw_source_code)
185
186 script_list = []
187 for cell in ipynb_struct["cells"]:
188 if cell["cell_type"] == "code":
189 source = strip_magic(cell["source"])
190 script_list.append("".join(source))
191
192 joined_script = "\n #cell \n".join(script_list)
193
194 logger.debug(f"Joined .ipynb cells to:\n{joined_script}")
195
196 return joined_script
197
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py
--- a/src/fal/fal_script.py
+++ b/src/fal/fal_script.py
@@ -88,6 +88,7 @@
program = compile(source_code, self.path, "exec")
exec_globals = {
+ "__name__": "__main__",
"context": self._build_script_context(),
"ref": faldbt.ref,
"source": faldbt.source,
|
{"golden_diff": "diff --git a/src/fal/fal_script.py b/src/fal/fal_script.py\n--- a/src/fal/fal_script.py\n+++ b/src/fal/fal_script.py\n@@ -88,6 +88,7 @@\n program = compile(source_code, self.path, \"exec\")\n \n exec_globals = {\n+ \"__name__\": \"__main__\",\n \"context\": self._build_script_context(),\n \"ref\": faldbt.ref,\n \"source\": faldbt.source,\n", "issue": "Make fal scripts __name__ == '__main__'\n**Context**\r\nWhen users test scripts they will normally have a \r\n\r\n```py\r\nif __name__ == '__main__':\r\n main()\r\n```\r\n\r\nand the `main()` function will be where the whole script lives.\r\n\r\nWhen a script is associated with a model, it is executed \"directly\". So it could be considered the \"__main__\" script.\r\n\r\n**Describe alternatives you've considered**\r\nGoing for the dbt interface of offering a function.\r\n\r\n```py\r\n# for models\r\ndef model():\r\n pass\r\n```\r\n\r\n```py\r\n# for hooks/after/before scripts\r\ndef hook():\r\n pass\r\n\n", "before_files": [{"content": "import os\nimport json\nfrom typing import Dict, Any, List, Optional, Union\nfrom pathlib import Path\nfrom functools import partial\nfrom dataclasses import dataclass\nfrom deprecation import deprecated\n\nfrom faldbt.parse import normalize_path\nfrom faldbt.project import DbtModel, FalDbt\nimport faldbt.lib as lib\n\nfrom dbt.contracts.results import RunStatus\nfrom dbt.config.runtime import RuntimeConfig\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nif lib.DBT_VCURRENT.compare(lib.DBT_V1) >= 0:\n from dbt.contracts.graph.parsed import ColumnInfo\nelse:\n from faldbt.cp.contracts.graph.parsed import ColumnInfo\n\n\n@dataclass\nclass CurrentModel:\n name: str\n alias: str\n status: RunStatus\n columns: Dict[str, ColumnInfo]\n tests: List[Any]\n meta: Dict[Any, Any]\n\n\n@dataclass\nclass CurrentTest:\n name: str\n model_name: str\n column: str\n status: str\n\n @property\n @deprecated(details=\"Use 'model_name' instead\")\n def modelname(self):\n return self.model_name\n\n\n@dataclass\nclass ContextConfig:\n target_path: Path\n\n def __init__(self, config: RuntimeConfig):\n self.target_path = Path(\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n\n\n@dataclass\nclass Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n\n\n@dataclass(frozen=True, init=False)\nclass FalScript:\n model: Optional[DbtModel]\n path: Path\n _faldbt: FalDbt\n\n def __init__(self, faldbt: FalDbt, model: Optional[DbtModel], path: str):\n # Necessary because of frozen=True\n object.__setattr__(self, \"model\", model)\n object.__setattr__(self, \"path\", normalize_path(faldbt.scripts_dir, path))\n object.__setattr__(self, \"_faldbt\", faldbt)\n\n @classmethod\n def model_script(cls, faldbt: FalDbt, model: DbtModel):\n script = FalScript(faldbt, model, \"\")\n # HACK: Set the script path specially for this case\n object.__setattr__(script, \"path\", model.python_model)\n return script\n\n def exec(self, faldbt: FalDbt):\n \"\"\"\n Executes the script\n \"\"\"\n # Enable local imports\n try:\n source_code = python_from_file(self.path)\n program = compile(source_code, self.path, \"exec\")\n\n exec_globals = {\n \"context\": self._build_script_context(),\n \"ref\": faldbt.ref,\n \"source\": faldbt.source,\n \"write_to_source\": faldbt.write_to_source,\n \"write_to_firestore\": faldbt.write_to_firestore,\n \"list_models\": faldbt.list_models,\n \"list_models_ids\": faldbt.list_models_ids,\n \"list_sources\": faldbt.list_sources,\n \"list_features\": faldbt.list_features,\n \"el\": faldbt.el,\n }\n\n if self.model is not None:\n # Hard-wire the model\n exec_globals[\"write_to_model\"] = partial(\n faldbt.write_to_model, target_1=self.model.name, target_2=None\n )\n\n exec(program, exec_globals)\n finally:\n pass\n\n @property\n def id(self):\n # TODO: maybe `self.path - project_dir`, to show only relevant path\n return f\"({self.model_name},{self.path})\"\n\n @property\n def is_global(self):\n return self.model is None\n\n @property\n def model_name(self):\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n\n def _build_script_context(self):\n context_config = ContextConfig(self._faldbt._config)\n if self.is_global:\n return Context(current_model=None, config=context_config)\n\n model: DbtModel = self.model # type: ignore\n\n meta = model.meta\n _del_key(meta, self._faldbt.keyword)\n\n tests = _process_tests(model.tests)\n\n current_model = CurrentModel(\n name=model.name,\n alias=model.alias,\n status=model.status,\n columns=model.columns,\n tests=tests,\n meta=meta,\n )\n\n return Context(current_model=current_model, config=context_config)\n\n\ndef _del_key(dict: Dict[str, Any], key: str):\n try:\n del dict[key]\n except KeyError:\n pass\n\n\ndef _process_tests(tests: List[Any]):\n return list(\n map(\n lambda test: CurrentTest(\n name=test.name,\n column=test.column,\n status=test.status,\n model_name=test.model,\n ),\n tests,\n )\n )\n\n\ndef python_from_file(path: Path) -> str:\n with open(path) as file:\n raw_source_code = file.read()\n if path.suffix == \".ipynb\":\n raw_source_code = _process_ipynb(raw_source_code)\n return raw_source_code\n\n\ndef _process_ipynb(raw_source_code: str) -> str:\n def strip_magic(source: List[str]) -> List[str]:\n NOTEBOOK_LIB = \"faldbt.magics\"\n return [item for item in source if item[0] != \"%\" and NOTEBOOK_LIB not in item]\n\n ipynb_struct = json.loads(raw_source_code)\n\n script_list = []\n for cell in ipynb_struct[\"cells\"]:\n if cell[\"cell_type\"] == \"code\":\n source = strip_magic(cell[\"source\"])\n script_list.append(\"\".join(source))\n\n joined_script = \"\\n #cell \\n\".join(script_list)\n\n logger.debug(f\"Joined .ipynb cells to:\\n{joined_script}\")\n\n return joined_script\n", "path": "src/fal/fal_script.py"}], "after_files": [{"content": "import os\nimport json\nfrom typing import Dict, Any, List, Optional, Union\nfrom pathlib import Path\nfrom functools import partial\nfrom dataclasses import dataclass\nfrom deprecation import deprecated\n\nfrom faldbt.parse import normalize_path\nfrom faldbt.project import DbtModel, FalDbt\nimport faldbt.lib as lib\n\nfrom dbt.contracts.results import RunStatus\nfrom dbt.config.runtime import RuntimeConfig\nfrom dbt.logger import GLOBAL_LOGGER as logger\n\nif lib.DBT_VCURRENT.compare(lib.DBT_V1) >= 0:\n from dbt.contracts.graph.parsed import ColumnInfo\nelse:\n from faldbt.cp.contracts.graph.parsed import ColumnInfo\n\n\n@dataclass\nclass CurrentModel:\n name: str\n alias: str\n status: RunStatus\n columns: Dict[str, ColumnInfo]\n tests: List[Any]\n meta: Dict[Any, Any]\n\n\n@dataclass\nclass CurrentTest:\n name: str\n model_name: str\n column: str\n status: str\n\n @property\n @deprecated(details=\"Use 'model_name' instead\")\n def modelname(self):\n return self.model_name\n\n\n@dataclass\nclass ContextConfig:\n target_path: Path\n\n def __init__(self, config: RuntimeConfig):\n self.target_path = Path(\n os.path.realpath(os.path.join(config.project_root, config.target_path))\n )\n\n\n@dataclass\nclass Context:\n current_model: Union[CurrentModel, None]\n config: ContextConfig\n\n\n@dataclass(frozen=True, init=False)\nclass FalScript:\n model: Optional[DbtModel]\n path: Path\n _faldbt: FalDbt\n\n def __init__(self, faldbt: FalDbt, model: Optional[DbtModel], path: str):\n # Necessary because of frozen=True\n object.__setattr__(self, \"model\", model)\n object.__setattr__(self, \"path\", normalize_path(faldbt.scripts_dir, path))\n object.__setattr__(self, \"_faldbt\", faldbt)\n\n @classmethod\n def model_script(cls, faldbt: FalDbt, model: DbtModel):\n script = FalScript(faldbt, model, \"\")\n # HACK: Set the script path specially for this case\n object.__setattr__(script, \"path\", model.python_model)\n return script\n\n def exec(self, faldbt: FalDbt):\n \"\"\"\n Executes the script\n \"\"\"\n # Enable local imports\n try:\n source_code = python_from_file(self.path)\n program = compile(source_code, self.path, \"exec\")\n\n exec_globals = {\n \"__name__\": \"__main__\",\n \"context\": self._build_script_context(),\n \"ref\": faldbt.ref,\n \"source\": faldbt.source,\n \"write_to_source\": faldbt.write_to_source,\n \"write_to_firestore\": faldbt.write_to_firestore,\n \"list_models\": faldbt.list_models,\n \"list_models_ids\": faldbt.list_models_ids,\n \"list_sources\": faldbt.list_sources,\n \"list_features\": faldbt.list_features,\n \"el\": faldbt.el,\n }\n\n if self.model is not None:\n # Hard-wire the model\n exec_globals[\"write_to_model\"] = partial(\n faldbt.write_to_model, target_1=self.model.name, target_2=None\n )\n\n exec(program, exec_globals)\n finally:\n pass\n\n @property\n def id(self):\n # TODO: maybe `self.path - project_dir`, to show only relevant path\n return f\"({self.model_name},{self.path})\"\n\n @property\n def is_global(self):\n return self.model is None\n\n @property\n def model_name(self):\n return \"<GLOBAL>\" if self.is_global else self.model.name # type: ignore\n\n def _build_script_context(self):\n context_config = ContextConfig(self._faldbt._config)\n if self.is_global:\n return Context(current_model=None, config=context_config)\n\n model: DbtModel = self.model # type: ignore\n\n meta = model.meta\n _del_key(meta, self._faldbt.keyword)\n\n tests = _process_tests(model.tests)\n\n current_model = CurrentModel(\n name=model.name,\n alias=model.alias,\n status=model.status,\n columns=model.columns,\n tests=tests,\n meta=meta,\n )\n\n return Context(current_model=current_model, config=context_config)\n\n\ndef _del_key(dict: Dict[str, Any], key: str):\n try:\n del dict[key]\n except KeyError:\n pass\n\n\ndef _process_tests(tests: List[Any]):\n return list(\n map(\n lambda test: CurrentTest(\n name=test.name,\n column=test.column,\n status=test.status,\n model_name=test.model,\n ),\n tests,\n )\n )\n\n\ndef python_from_file(path: Path) -> str:\n with open(path) as file:\n raw_source_code = file.read()\n if path.suffix == \".ipynb\":\n raw_source_code = _process_ipynb(raw_source_code)\n return raw_source_code\n\n\ndef _process_ipynb(raw_source_code: str) -> str:\n def strip_magic(source: List[str]) -> List[str]:\n NOTEBOOK_LIB = \"faldbt.magics\"\n return [item for item in source if item[0] != \"%\" and NOTEBOOK_LIB not in item]\n\n ipynb_struct = json.loads(raw_source_code)\n\n script_list = []\n for cell in ipynb_struct[\"cells\"]:\n if cell[\"cell_type\"] == \"code\":\n source = strip_magic(cell[\"source\"])\n script_list.append(\"\".join(source))\n\n joined_script = \"\\n #cell \\n\".join(script_list)\n\n logger.debug(f\"Joined .ipynb cells to:\\n{joined_script}\")\n\n return joined_script\n", "path": "src/fal/fal_script.py"}]}
| 2,189 | 109 |
gh_patches_debug_35879
|
rasdani/github-patches
|
git_diff
|
iterative__dvc-1437
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Image is not supported as a dependency
Version: `0.22.0+f9b30a`
I ran this command:
```
dvc run -d 2018-12-12-13:32:56.png -o derp 'echo derp > derp'
```
Where the dependency is:
```
2018-12-12-13:32:56.png: PNG image data, 1228 x 494, 8-bit/color RGBA, non-interlaced
```
And I got this:
```
Error: Failed to run command: Dependency '2018-12-12-13:32:56.png' is not supported
```
Is this intended? Why we aren't supporting images as dependencies?
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `dvc/output/__init__.py`
Content:
```
1 import schema
2
3 from dvc.exceptions import DvcException
4 from dvc.config import Config
5
6 from dvc.dependency import SCHEMA, urlparse
7 from dvc.dependency.base import DependencyBase
8 from dvc.output.s3 import OutputS3
9 from dvc.output.gs import OutputGS
10 from dvc.output.local import OutputLOCAL
11 from dvc.output.hdfs import OutputHDFS
12 from dvc.output.ssh import OutputSSH
13
14 from dvc.remote import Remote
15
16
17 OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]
18
19 OUTS_MAP = {'hdfs': OutputHDFS,
20 's3': OutputS3,
21 'gs': OutputGS,
22 'ssh': OutputSSH,
23 '': OutputLOCAL}
24
25 SCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool
26 SCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA
27
28
29 def _get(stage, p, info, cache, metric):
30 parsed = urlparse(p)
31 if parsed.scheme == 'remote':
32 name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)
33 sect = stage.project.config._config[name]
34 remote = Remote(stage.project, sect)
35 return OUTS_MAP[remote.scheme](stage,
36 p,
37 info,
38 cache=cache,
39 remote=remote,
40 metric=metric)
41
42 for o in OUTS:
43 if o.supported(p):
44 return o(stage, p, info, cache=cache, remote=None, metric=metric)
45 raise DvcException('Output \'{}\' is not supported'.format(p))
46
47
48 def loadd_from(stage, d_list):
49 ret = []
50 for d in d_list:
51 p = d.pop(DependencyBase.PARAM_PATH)
52 cache = d.pop(OutputLOCAL.PARAM_CACHE, True)
53 metric = d.pop(OutputLOCAL.PARAM_METRIC, False)
54 ret.append(_get(stage, p, info=d, cache=cache, metric=metric))
55 return ret
56
57
58 def loads_from(stage, s_list, use_cache=True, metric=False):
59 ret = []
60 for s in s_list:
61 ret.append(_get(stage, s, info={}, cache=use_cache, metric=metric))
62 return ret
63
```
Path: `dvc/dependency/__init__.py`
Content:
```
1 import schema
2
3 try:
4 from urlparse import urlparse
5 except ImportError:
6 from urllib.parse import urlparse
7
8 from dvc.exceptions import DvcException
9 from dvc.config import Config
10
11 from dvc.dependency.base import DependencyBase
12 from dvc.dependency.s3 import DependencyS3
13 from dvc.dependency.gs import DependencyGS
14 from dvc.dependency.local import DependencyLOCAL
15 from dvc.dependency.hdfs import DependencyHDFS
16 from dvc.dependency.ssh import DependencySSH
17 from dvc.dependency.http import DependencyHTTP
18
19 from dvc.remote import Remote
20 from dvc.remote.local import RemoteLOCAL
21 from dvc.remote.s3 import RemoteS3
22 from dvc.remote.hdfs import RemoteHDFS
23
24 DEPS = [
25 DependencyGS,
26 DependencyHDFS,
27 DependencyHTTP,
28 DependencyLOCAL,
29 DependencyS3,
30 DependencySSH,
31 ]
32
33 DEP_MAP = {
34 '': DependencyLOCAL,
35 'ssh': DependencySSH,
36 's3': DependencyS3,
37 'gs': DependencyGS,
38 'hdfs': DependencyHDFS,
39 'http': DependencyHTTP,
40 'https': DependencyHTTP,
41 }
42
43 # We are skipping RemoteHTTP.PARAM_ETAG because is the same as RemoteS3
44 SCHEMA = {
45 DependencyBase.PARAM_PATH: str,
46 schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),
47 schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),
48 schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),
49 }
50
51
52 def _get(stage, p, info):
53 parsed = urlparse(p)
54 if parsed.scheme == 'remote':
55 name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)
56 sect = stage.project.config._config[name]
57 remote = Remote(stage.project, sect)
58 return DEP_MAP[remote.scheme](stage, p, info, remote=remote)
59
60 for d in DEPS:
61 if d.supported(p):
62 return d(stage, p, info)
63 raise DvcException('Dependency \'{}\' is not supported'.format(p))
64
65
66 def loadd_from(stage, d_list):
67 ret = []
68 for d in d_list:
69 p = d.pop(DependencyBase.PARAM_PATH)
70 ret.append(_get(stage, p, d))
71 return ret
72
73
74 def loads_from(stage, s_list):
75 ret = []
76 for s in s_list:
77 ret.append(_get(stage, s, {}))
78 return ret
79
```
Path: `dvc/dependency/local.py`
Content:
```
1 import os
2
3 try:
4 from urlparse import urlparse
5 except ImportError:
6 from urllib.parse import urlparse
7
8 from dvc.dependency.base import DependencyBase
9 from dvc.dependency.base import DependencyDoesNotExistError
10 from dvc.dependency.base import DependencyIsNotFileOrDirError
11 from dvc.remote.local import RemoteLOCAL
12
13
14 class DependencyLOCAL(DependencyBase):
15 REGEX = r'^(?P<path>(/+|.:\\+)?[^:]*)$'
16
17 DoesNotExistError = DependencyDoesNotExistError
18 IsNotFileOrDirError = DependencyIsNotFileOrDirError
19
20 def __init__(self, stage, path, info=None, remote=None):
21 self.stage = stage
22 self.project = stage.project
23 self.info = info
24 if remote is not None:
25 self.remote = remote
26 else:
27 self.remote = RemoteLOCAL(stage.project, {})
28
29 if remote:
30 path = os.path.join(remote.prefix, urlparse(path).path.lstrip('/'))
31
32 if not os.path.isabs(path):
33 path = self.remote.ospath(path)
34 path = os.path.join(stage.cwd, path)
35 self.path = os.path.abspath(os.path.normpath(path))
36
37 self.path_info = {'scheme': 'local',
38 'path': self.path}
39
40 def __str__(self):
41 return self.rel_path
42
43 @property
44 def is_local(self):
45 assert os.path.isabs(self.path)
46 assert os.path.isabs(self.project.root_dir)
47 return self.path.startswith(self.project.root_dir)
48
49 @property
50 def sep(self):
51 return os.sep
52
53 @property
54 def rel_path(self):
55 return os.path.relpath(self.path)
56
57 def changed(self):
58 if not self.exists:
59 return True
60
61 info = self.remote.save_info(self.path_info)
62
63 return self.info != info
64
65 def save(self):
66 if not self.exists:
67 raise self.DoesNotExistError(self.rel_path)
68
69 if not os.path.isfile(self.path) \
70 and not os.path.isdir(self.path): # pragma: no cover
71 raise self.IsNotFileOrDirError(self.rel_path)
72
73 if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or \
74 (os.path.isdir(self.path) and len(os.listdir(self.path)) == 0):
75 msg = "File/directory '{}' is empty.".format(self.rel_path)
76 self.project.logger.warn(msg)
77
78 self.info = self.remote.save_info(self.path_info)
79
80 def dumpd(self):
81 if self.is_local:
82 path = self.remote.unixpath(os.path.relpath(self.path,
83 self.stage.cwd))
84 else:
85 path = self.path
86
87 info = self.info.copy()
88 info[self.PARAM_PATH] = path
89 return info
90
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/dvc/dependency/__init__.py b/dvc/dependency/__init__.py
--- a/dvc/dependency/__init__.py
+++ b/dvc/dependency/__init__.py
@@ -5,7 +5,6 @@
except ImportError:
from urllib.parse import urlparse
-from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency.base import DependencyBase
@@ -25,9 +24,9 @@
DependencyGS,
DependencyHDFS,
DependencyHTTP,
- DependencyLOCAL,
DependencyS3,
DependencySSH,
+ # NOTE: DependencyLOCAL is the default choice
]
DEP_MAP = {
@@ -60,7 +59,7 @@
for d in DEPS:
if d.supported(p):
return d(stage, p, info)
- raise DvcException('Dependency \'{}\' is not supported'.format(p))
+ return DependencyLOCAL(stage, p, info)
def loadd_from(stage, d_list):
diff --git a/dvc/dependency/local.py b/dvc/dependency/local.py
--- a/dvc/dependency/local.py
+++ b/dvc/dependency/local.py
@@ -12,7 +12,7 @@
class DependencyLOCAL(DependencyBase):
- REGEX = r'^(?P<path>(/+|.:\\+)?[^:]*)$'
+ REGEX = r'^(?P<path>.*)$'
DoesNotExistError = DependencyDoesNotExistError
IsNotFileOrDirError = DependencyIsNotFileOrDirError
diff --git a/dvc/output/__init__.py b/dvc/output/__init__.py
--- a/dvc/output/__init__.py
+++ b/dvc/output/__init__.py
@@ -1,6 +1,5 @@
import schema
-from dvc.exceptions import DvcException
from dvc.config import Config
from dvc.dependency import SCHEMA, urlparse
@@ -14,7 +13,13 @@
from dvc.remote import Remote
-OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]
+OUTS = [
+ OutputHDFS,
+ OutputS3,
+ OutputGS,
+ OutputSSH,
+ # NOTE: OutputLOCAL is the default choice
+]
OUTS_MAP = {'hdfs': OutputHDFS,
's3': OutputS3,
@@ -42,7 +47,7 @@
for o in OUTS:
if o.supported(p):
return o(stage, p, info, cache=cache, remote=None, metric=metric)
- raise DvcException('Output \'{}\' is not supported'.format(p))
+ return OutputLOCAL(stage, p, info, cache=cache, remote=None, metric=metric)
def loadd_from(stage, d_list):
|
{"golden_diff": "diff --git a/dvc/dependency/__init__.py b/dvc/dependency/__init__.py\n--- a/dvc/dependency/__init__.py\n+++ b/dvc/dependency/__init__.py\n@@ -5,7 +5,6 @@\n except ImportError:\n from urllib.parse import urlparse\n \n-from dvc.exceptions import DvcException\n from dvc.config import Config\n \n from dvc.dependency.base import DependencyBase\n@@ -25,9 +24,9 @@\n DependencyGS,\n DependencyHDFS,\n DependencyHTTP,\n- DependencyLOCAL,\n DependencyS3,\n DependencySSH,\n+ # NOTE: DependencyLOCAL is the default choice\n ]\n \n DEP_MAP = {\n@@ -60,7 +59,7 @@\n for d in DEPS:\n if d.supported(p):\n return d(stage, p, info)\n- raise DvcException('Dependency \\'{}\\' is not supported'.format(p))\n+ return DependencyLOCAL(stage, p, info)\n \n \n def loadd_from(stage, d_list):\ndiff --git a/dvc/dependency/local.py b/dvc/dependency/local.py\n--- a/dvc/dependency/local.py\n+++ b/dvc/dependency/local.py\n@@ -12,7 +12,7 @@\n \n \n class DependencyLOCAL(DependencyBase):\n- REGEX = r'^(?P<path>(/+|.:\\\\+)?[^:]*)$'\n+ REGEX = r'^(?P<path>.*)$'\n \n DoesNotExistError = DependencyDoesNotExistError\n IsNotFileOrDirError = DependencyIsNotFileOrDirError\ndiff --git a/dvc/output/__init__.py b/dvc/output/__init__.py\n--- a/dvc/output/__init__.py\n+++ b/dvc/output/__init__.py\n@@ -1,6 +1,5 @@\n import schema\n \n-from dvc.exceptions import DvcException\n from dvc.config import Config\n \n from dvc.dependency import SCHEMA, urlparse\n@@ -14,7 +13,13 @@\n from dvc.remote import Remote\n \n \n-OUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]\n+OUTS = [\n+ OutputHDFS,\n+ OutputS3,\n+ OutputGS,\n+ OutputSSH,\n+ # NOTE: OutputLOCAL is the default choice\n+]\n \n OUTS_MAP = {'hdfs': OutputHDFS,\n 's3': OutputS3,\n@@ -42,7 +47,7 @@\n for o in OUTS:\n if o.supported(p):\n return o(stage, p, info, cache=cache, remote=None, metric=metric)\n- raise DvcException('Output \\'{}\\' is not supported'.format(p))\n+ return OutputLOCAL(stage, p, info, cache=cache, remote=None, metric=metric)\n \n \n def loadd_from(stage, d_list):\n", "issue": "Image is not supported as a dependency\nVersion: `0.22.0+f9b30a`\r\n\r\nI ran this command:\r\n```\r\ndvc run -d 2018-12-12-13:32:56.png -o derp 'echo derp > derp'\r\n```\r\n\r\nWhere the dependency is:\r\n```\r\n2018-12-12-13:32:56.png: PNG image data, 1228 x 494, 8-bit/color RGBA, non-interlaced\r\n```\r\n\r\nAnd I got this:\r\n```\r\nError: Failed to run command: Dependency '2018-12-12-13:32:56.png' is not supported\r\n```\r\n\r\nIs this intended? Why we aren't supporting images as dependencies?\n", "before_files": [{"content": "import schema\n\nfrom dvc.exceptions import DvcException\nfrom dvc.config import Config\n\nfrom dvc.dependency import SCHEMA, urlparse\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.output.s3 import OutputS3\nfrom dvc.output.gs import OutputGS\nfrom dvc.output.local import OutputLOCAL\nfrom dvc.output.hdfs import OutputHDFS\nfrom dvc.output.ssh import OutputSSH\n\nfrom dvc.remote import Remote\n\n\nOUTS = [OutputHDFS, OutputS3, OutputGS, OutputSSH, OutputLOCAL]\n\nOUTS_MAP = {'hdfs': OutputHDFS,\n 's3': OutputS3,\n 'gs': OutputGS,\n 'ssh': OutputSSH,\n '': OutputLOCAL}\n\nSCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool\nSCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA\n\n\ndef _get(stage, p, info, cache, metric):\n parsed = urlparse(p)\n if parsed.scheme == 'remote':\n name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)\n sect = stage.project.config._config[name]\n remote = Remote(stage.project, sect)\n return OUTS_MAP[remote.scheme](stage,\n p,\n info,\n cache=cache,\n remote=remote,\n metric=metric)\n\n for o in OUTS:\n if o.supported(p):\n return o(stage, p, info, cache=cache, remote=None, metric=metric)\n raise DvcException('Output \\'{}\\' is not supported'.format(p))\n\n\ndef loadd_from(stage, d_list):\n ret = []\n for d in d_list:\n p = d.pop(DependencyBase.PARAM_PATH)\n cache = d.pop(OutputLOCAL.PARAM_CACHE, True)\n metric = d.pop(OutputLOCAL.PARAM_METRIC, False)\n ret.append(_get(stage, p, info=d, cache=cache, metric=metric))\n return ret\n\n\ndef loads_from(stage, s_list, use_cache=True, metric=False):\n ret = []\n for s in s_list:\n ret.append(_get(stage, s, info={}, cache=use_cache, metric=metric))\n return ret\n", "path": "dvc/output/__init__.py"}, {"content": "import schema\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.exceptions import DvcException\nfrom dvc.config import Config\n\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.dependency.s3 import DependencyS3\nfrom dvc.dependency.gs import DependencyGS\nfrom dvc.dependency.local import DependencyLOCAL\nfrom dvc.dependency.hdfs import DependencyHDFS\nfrom dvc.dependency.ssh import DependencySSH\nfrom dvc.dependency.http import DependencyHTTP\n\nfrom dvc.remote import Remote\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.remote.s3 import RemoteS3\nfrom dvc.remote.hdfs import RemoteHDFS\n\nDEPS = [\n DependencyGS,\n DependencyHDFS,\n DependencyHTTP,\n DependencyLOCAL,\n DependencyS3,\n DependencySSH,\n]\n\nDEP_MAP = {\n '': DependencyLOCAL,\n 'ssh': DependencySSH,\n 's3': DependencyS3,\n 'gs': DependencyGS,\n 'hdfs': DependencyHDFS,\n 'http': DependencyHTTP,\n 'https': DependencyHTTP,\n}\n\n# We are skipping RemoteHTTP.PARAM_ETAG because is the same as RemoteS3\nSCHEMA = {\n DependencyBase.PARAM_PATH: str,\n schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),\n schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),\n schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),\n}\n\n\ndef _get(stage, p, info):\n parsed = urlparse(p)\n if parsed.scheme == 'remote':\n name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)\n sect = stage.project.config._config[name]\n remote = Remote(stage.project, sect)\n return DEP_MAP[remote.scheme](stage, p, info, remote=remote)\n\n for d in DEPS:\n if d.supported(p):\n return d(stage, p, info)\n raise DvcException('Dependency \\'{}\\' is not supported'.format(p))\n\n\ndef loadd_from(stage, d_list):\n ret = []\n for d in d_list:\n p = d.pop(DependencyBase.PARAM_PATH)\n ret.append(_get(stage, p, d))\n return ret\n\n\ndef loads_from(stage, s_list):\n ret = []\n for s in s_list:\n ret.append(_get(stage, s, {}))\n return ret\n", "path": "dvc/dependency/__init__.py"}, {"content": "import os\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.dependency.base import DependencyDoesNotExistError\nfrom dvc.dependency.base import DependencyIsNotFileOrDirError\nfrom dvc.remote.local import RemoteLOCAL\n\n\nclass DependencyLOCAL(DependencyBase):\n REGEX = r'^(?P<path>(/+|.:\\\\+)?[^:]*)$'\n\n DoesNotExistError = DependencyDoesNotExistError\n IsNotFileOrDirError = DependencyIsNotFileOrDirError\n\n def __init__(self, stage, path, info=None, remote=None):\n self.stage = stage\n self.project = stage.project\n self.info = info\n if remote is not None:\n self.remote = remote\n else:\n self.remote = RemoteLOCAL(stage.project, {})\n\n if remote:\n path = os.path.join(remote.prefix, urlparse(path).path.lstrip('/'))\n\n if not os.path.isabs(path):\n path = self.remote.ospath(path)\n path = os.path.join(stage.cwd, path)\n self.path = os.path.abspath(os.path.normpath(path))\n\n self.path_info = {'scheme': 'local',\n 'path': self.path}\n\n def __str__(self):\n return self.rel_path\n\n @property\n def is_local(self):\n assert os.path.isabs(self.path)\n assert os.path.isabs(self.project.root_dir)\n return self.path.startswith(self.project.root_dir)\n\n @property\n def sep(self):\n return os.sep\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path)\n\n def changed(self):\n if not self.exists:\n return True\n\n info = self.remote.save_info(self.path_info)\n\n return self.info != info\n\n def save(self):\n if not self.exists:\n raise self.DoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) \\\n and not os.path.isdir(self.path): # pragma: no cover\n raise self.IsNotFileOrDirError(self.rel_path)\n\n if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or \\\n (os.path.isdir(self.path) and len(os.listdir(self.path)) == 0):\n msg = \"File/directory '{}' is empty.\".format(self.rel_path)\n self.project.logger.warn(msg)\n\n self.info = self.remote.save_info(self.path_info)\n\n def dumpd(self):\n if self.is_local:\n path = self.remote.unixpath(os.path.relpath(self.path,\n self.stage.cwd))\n else:\n path = self.path\n\n info = self.info.copy()\n info[self.PARAM_PATH] = path\n return info\n", "path": "dvc/dependency/local.py"}], "after_files": [{"content": "import schema\n\nfrom dvc.config import Config\n\nfrom dvc.dependency import SCHEMA, urlparse\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.output.s3 import OutputS3\nfrom dvc.output.gs import OutputGS\nfrom dvc.output.local import OutputLOCAL\nfrom dvc.output.hdfs import OutputHDFS\nfrom dvc.output.ssh import OutputSSH\n\nfrom dvc.remote import Remote\n\n\nOUTS = [\n OutputHDFS,\n OutputS3,\n OutputGS,\n OutputSSH,\n # NOTE: OutputLOCAL is the default choice\n]\n\nOUTS_MAP = {'hdfs': OutputHDFS,\n 's3': OutputS3,\n 'gs': OutputGS,\n 'ssh': OutputSSH,\n '': OutputLOCAL}\n\nSCHEMA[schema.Optional(OutputLOCAL.PARAM_CACHE)] = bool\nSCHEMA[schema.Optional(OutputLOCAL.PARAM_METRIC)] = OutputLOCAL.METRIC_SCHEMA\n\n\ndef _get(stage, p, info, cache, metric):\n parsed = urlparse(p)\n if parsed.scheme == 'remote':\n name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)\n sect = stage.project.config._config[name]\n remote = Remote(stage.project, sect)\n return OUTS_MAP[remote.scheme](stage,\n p,\n info,\n cache=cache,\n remote=remote,\n metric=metric)\n\n for o in OUTS:\n if o.supported(p):\n return o(stage, p, info, cache=cache, remote=None, metric=metric)\n return OutputLOCAL(stage, p, info, cache=cache, remote=None, metric=metric)\n\n\ndef loadd_from(stage, d_list):\n ret = []\n for d in d_list:\n p = d.pop(DependencyBase.PARAM_PATH)\n cache = d.pop(OutputLOCAL.PARAM_CACHE, True)\n metric = d.pop(OutputLOCAL.PARAM_METRIC, False)\n ret.append(_get(stage, p, info=d, cache=cache, metric=metric))\n return ret\n\n\ndef loads_from(stage, s_list, use_cache=True, metric=False):\n ret = []\n for s in s_list:\n ret.append(_get(stage, s, info={}, cache=use_cache, metric=metric))\n return ret\n", "path": "dvc/output/__init__.py"}, {"content": "import schema\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.config import Config\n\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.dependency.s3 import DependencyS3\nfrom dvc.dependency.gs import DependencyGS\nfrom dvc.dependency.local import DependencyLOCAL\nfrom dvc.dependency.hdfs import DependencyHDFS\nfrom dvc.dependency.ssh import DependencySSH\nfrom dvc.dependency.http import DependencyHTTP\n\nfrom dvc.remote import Remote\nfrom dvc.remote.local import RemoteLOCAL\nfrom dvc.remote.s3 import RemoteS3\nfrom dvc.remote.hdfs import RemoteHDFS\n\nDEPS = [\n DependencyGS,\n DependencyHDFS,\n DependencyHTTP,\n DependencyS3,\n DependencySSH,\n # NOTE: DependencyLOCAL is the default choice\n]\n\nDEP_MAP = {\n '': DependencyLOCAL,\n 'ssh': DependencySSH,\n 's3': DependencyS3,\n 'gs': DependencyGS,\n 'hdfs': DependencyHDFS,\n 'http': DependencyHTTP,\n 'https': DependencyHTTP,\n}\n\n# We are skipping RemoteHTTP.PARAM_ETAG because is the same as RemoteS3\nSCHEMA = {\n DependencyBase.PARAM_PATH: str,\n schema.Optional(RemoteLOCAL.PARAM_MD5): schema.Or(str, None),\n schema.Optional(RemoteS3.PARAM_ETAG): schema.Or(str, None),\n schema.Optional(RemoteHDFS.PARAM_CHECKSUM): schema.Or(str, None),\n}\n\n\ndef _get(stage, p, info):\n parsed = urlparse(p)\n if parsed.scheme == 'remote':\n name = Config.SECTION_REMOTE_FMT.format(parsed.netloc)\n sect = stage.project.config._config[name]\n remote = Remote(stage.project, sect)\n return DEP_MAP[remote.scheme](stage, p, info, remote=remote)\n\n for d in DEPS:\n if d.supported(p):\n return d(stage, p, info)\n return DependencyLOCAL(stage, p, info)\n\n\ndef loadd_from(stage, d_list):\n ret = []\n for d in d_list:\n p = d.pop(DependencyBase.PARAM_PATH)\n ret.append(_get(stage, p, d))\n return ret\n\n\ndef loads_from(stage, s_list):\n ret = []\n for s in s_list:\n ret.append(_get(stage, s, {}))\n return ret\n", "path": "dvc/dependency/__init__.py"}, {"content": "import os\n\ntry:\n from urlparse import urlparse\nexcept ImportError:\n from urllib.parse import urlparse\n\nfrom dvc.dependency.base import DependencyBase\nfrom dvc.dependency.base import DependencyDoesNotExistError\nfrom dvc.dependency.base import DependencyIsNotFileOrDirError\nfrom dvc.remote.local import RemoteLOCAL\n\n\nclass DependencyLOCAL(DependencyBase):\n REGEX = r'^(?P<path>.*)$'\n\n DoesNotExistError = DependencyDoesNotExistError\n IsNotFileOrDirError = DependencyIsNotFileOrDirError\n\n def __init__(self, stage, path, info=None, remote=None):\n self.stage = stage\n self.project = stage.project\n self.info = info\n if remote is not None:\n self.remote = remote\n else:\n self.remote = RemoteLOCAL(stage.project, {})\n\n if remote:\n path = os.path.join(remote.prefix, urlparse(path).path.lstrip('/'))\n\n if not os.path.isabs(path):\n path = self.remote.ospath(path)\n path = os.path.join(stage.cwd, path)\n self.path = os.path.abspath(os.path.normpath(path))\n\n self.path_info = {'scheme': 'local',\n 'path': self.path}\n\n def __str__(self):\n return self.rel_path\n\n @property\n def is_local(self):\n assert os.path.isabs(self.path)\n assert os.path.isabs(self.project.root_dir)\n return self.path.startswith(self.project.root_dir)\n\n @property\n def sep(self):\n return os.sep\n\n @property\n def rel_path(self):\n return os.path.relpath(self.path)\n\n def changed(self):\n if not self.exists:\n return True\n\n info = self.remote.save_info(self.path_info)\n\n return self.info != info\n\n def save(self):\n if not self.exists:\n raise self.DoesNotExistError(self.rel_path)\n\n if not os.path.isfile(self.path) \\\n and not os.path.isdir(self.path): # pragma: no cover\n raise self.IsNotFileOrDirError(self.rel_path)\n\n if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or \\\n (os.path.isdir(self.path) and len(os.listdir(self.path)) == 0):\n msg = \"File/directory '{}' is empty.\".format(self.rel_path)\n self.project.logger.warn(msg)\n\n self.info = self.remote.save_info(self.path_info)\n\n def dumpd(self):\n if self.is_local:\n path = self.remote.unixpath(os.path.relpath(self.path,\n self.stage.cwd))\n else:\n path = self.path\n\n info = self.info.copy()\n info[self.PARAM_PATH] = path\n return info\n", "path": "dvc/dependency/local.py"}]}
| 2,525 | 624 |
gh_patches_debug_21951
|
rasdani/github-patches
|
git_diff
|
nvaccess__nvda-9893
|
We are currently solving the following issue within our repository. Here is the issue text:
--- BEGIN ISSUE ---
Python 3 staging/Notepad++: obtainig font name fails because it is a bytes-like object
#9557 # Steps to reproduce:
1. Install Python 3 staging snapshot.
2. Run Notepad++ or any app that embeds Scintilla controls.
3. Try obtaining formatting info (NVDA+F).
### Actual behavior:
The following traceback is recorded:
```
ERROR - scriptHandler.executeScript (11:07:35.251):
error executing script: <bound method GlobalCommands.script_reportFormatting of <globalCommands.GlobalCommands object at 0x06B54FD0>> with gesture 'NVDA+f'
Traceback (most recent call last):
File "scriptHandler.pyc", line 189, in executeScript
File "globalCommands.pyc", line 1393, in script_reportFormatting
File "globalCommands.pyc", line 1366, in _reportFormattingHelper
File "textInfos\__init__.pyc", line 500, in getFormatFieldSpeech
File "speech\__init__.pyc", line 1717, in getFormatFieldSpeech
TypeError: sequence item 0: expected str instance, bytes found
```
### Expected behavior:
No errors.
### System configuration
#### NVDA installed/portable/running from source:
Installed and portable
#### NVDA version:
Python 3 staging 17954
#### Windows version:
Windows 10 Version 1903
#### Name and version of other software in use when reproducing the issue:
notepad++ 7.6.6
#### Other information about your system:
None
### Other questions
#### Does the issue still occur after restarting your PC?
Yes
#### Have you tried any other versions of NVDA? If so, please report their behaviors.
Yes - master snapshot, 2019.1.1.
--- END ISSUE ---
Below are some code segments, each from a relevant file. One or more of these files may contain bugs.
--- BEGIN FILES ---
Path: `source/NVDAObjects/window/scintilla.py`
Content:
```
1 import ctypes
2 import IAccessibleHandler
3 import speech
4 import textInfos.offsets
5 import winKernel
6 import winUser
7 import globalVars
8 import controlTypes
9 import config
10 from . import Window
11 from .. import NVDAObjectTextInfo
12 from ..behaviors import EditableTextWithAutoSelectDetection
13 import locale
14 import watchdog
15 import eventHandler
16 import locationHelper
17 import textUtils
18
19 # Window messages
20 SCI_POSITIONFROMPOINT=2022
21 SCI_POINTXFROMPOSITION=2164
22 SCI_POINTYFROMPOSITION=2165
23 SCI_GETTEXTRANGE=2162
24 SCI_GETTEXT=2182
25 SCI_GETTEXTLENGTH=2183
26 SCI_GETLENGTH=2006
27 SCI_GETCURRENTPOS=2008
28 SCI_GETANCHOR=2009
29 SCI_GOTOPOS=2025
30 SCI_SETCURRENTPOS=2141
31 SCI_GETSELECTIONSTART=2143
32 SCI_GETSELECTIONEND=2145
33 SCI_SETSEL=2160
34 SCI_GETLINEENDPOSITION=2136
35 SCI_GETLINECOUNT=2154
36 SCI_LINEFROMPOSITION=2166
37 SCI_POSITIONFROMLINE=2167
38 SCI_LINELENGTH=2350
39 SCI_GETSTYLEAT=2010
40 SCI_STYLEGETFONT=2486
41 SCI_STYLEGETSIZE=2485
42 SCI_STYLEGETBOLD=2483
43 SCI_STYLEGETITALIC=2484
44 SCI_STYLEGETUNDERLINE=2488
45 SCI_WORDSTARTPOSITION=2266
46 SCI_WORDENDPOSITION=2267
47 SC_WRAP_NONE=0
48 SCI_GETWRAPMODE=2269
49 SCI_GETCODEPAGE=2137
50 SCI_POSITIONAFTER=2418
51
52 #constants
53 #: Represents an invalid position within a document.
54 INVALID_POSITION=-1
55 STYLE_DEFAULT=32
56 SC_CP_UTF8=65001
57
58 class CharacterRangeStruct(ctypes.Structure):
59 _fields_=[
60 ('cpMin',ctypes.c_long),
61 ('cpMax',ctypes.c_long),
62 ]
63
64 class TextRangeStruct(ctypes.Structure):
65 _fields_=[
66 ('chrg',CharacterRangeStruct),
67 ('lpstrText',ctypes.c_char_p),
68 ]
69
70 class ScintillaTextInfo(textInfos.offsets.OffsetsTextInfo):
71
72 def _get_encoding(self):
73 cp=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCODEPAGE,0,0)
74 if cp==SC_CP_UTF8:
75 return "utf-8"
76 else:
77 return locale.getlocale()[1]
78
79 def _getOffsetFromPoint(self,x,y):
80 x, y = winUser.ScreenToClient(self.obj.windowHandle, x, y)
81 return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMPOINT,x,y)
82
83 def _getPointFromOffset(self,offset):
84 point=locationHelper.Point(
85 watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTXFROMPOSITION,None,offset),
86 watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)
87 ).toScreen(self.obj.windowHandle)
88 if point.x is not None and point.y is not None:
89 return point
90 else:
91 raise NotImplementedError
92
93 def _getFormatFieldAndOffsets(self,offset,formatConfig,calculateOffsets=True):
94 style=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,offset,0)
95 if calculateOffsets:
96 #we need to manually see how far the style goes, limit to line
97 lineStart,lineEnd=self._getLineOffsets(offset)
98 startOffset=offset
99 while startOffset>lineStart:
100 curStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,startOffset-1,0)
101 if curStyle==style:
102 startOffset-=1
103 else:
104 break
105 endOffset=offset+1
106 while endOffset<lineEnd:
107 curStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,endOffset,0)
108 if curStyle==style:
109 endOffset+=1
110 else:
111 break
112 else:
113 startOffset,endOffset=(self._startOffset,self._endOffset)
114 formatField=textInfos.FormatField()
115 if formatConfig["reportFontName"]:
116 #To get font name, We need to allocate memory with in Scintilla's process, and then copy it out
117 fontNameBuf=ctypes.create_string_buffer(32)
118 internalBuf=winKernel.virtualAllocEx(self.obj.processHandle,None,len(fontNameBuf),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)
119 try:
120 watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETFONT,style, internalBuf)
121 winKernel.readProcessMemory(self.obj.processHandle,internalBuf,fontNameBuf,len(fontNameBuf),None)
122 finally:
123 winKernel.virtualFreeEx(self.obj.processHandle,internalBuf,0,winKernel.MEM_RELEASE)
124 formatField["font-name"]=fontNameBuf.value
125 if formatConfig["reportFontSize"]:
126 formatField["font-size"]="%spt"%watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETSIZE,style,0)
127 if formatConfig["reportLineNumber"]:
128 formatField["line-number"]=self._getLineNumFromOffset(offset)+1
129 if formatConfig["reportFontAttributes"]:
130 formatField["bold"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETBOLD,style,0))
131 formatField["italic"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETITALIC,style,0))
132 formatField["underline"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETUNDERLINE,style,0))
133 return formatField,(startOffset,endOffset)
134
135 def _getCaretOffset(self):
136 return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCURRENTPOS,0,0)
137
138 def _setCaretOffset(self,offset):
139 watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GOTOPOS,offset,0)
140 # #5678: A caret event sometimes doesn't get fired when we do this,
141 # so fake one just in case.
142 eventHandler.executeEvent("caret", self.obj)
143
144 def _getSelectionOffsets(self):
145 start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONSTART,0,0)
146 end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONEND,0,0)
147 return (start,end)
148
149 def _setSelectionOffsets(self,start,end):
150 watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_SETSEL,start,end)
151
152 def _getStoryText(self):
153 if not hasattr(self,'_storyText'):
154 storyLength=self._getStoryLength()
155 self._storyText=self._getTextRange(0,storyLength)
156 return self._storyText
157
158 def _getStoryLength(self):
159 if not hasattr(self,'_storyLength'):
160 self._storyLength=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETTEXTLENGTH,0,0)
161 return self._storyLength
162
163 def _getLineCount(self):
164 return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETLINECOUNT,0,0)
165
166 def _getTextRange(self,start,end):
167 bufLen = (end - start) + 1
168 textRange = TextRangeStruct()
169 textRange.chrg.cpMin = start
170 textRange.chrg.cpMax = end
171 processHandle = self.obj.processHandle
172 internalBuf = winKernel.virtualAllocEx(processHandle, None, bufLen, winKernel.MEM_COMMIT, winKernel.PAGE_READWRITE)
173 try:
174 textRange.lpstrText = internalBuf
175 internalTextRange = winKernel.virtualAllocEx(processHandle, None, ctypes.sizeof(textRange), winKernel.MEM_COMMIT, winKernel.PAGE_READWRITE)
176 try:
177 winKernel.writeProcessMemory(processHandle, internalTextRange, ctypes.byref(textRange), ctypes.sizeof(textRange), None)
178 numBytes = watchdog.cancellableSendMessage(self.obj.windowHandle, SCI_GETTEXTRANGE, 0, internalTextRange)
179 finally:
180 winKernel.virtualFreeEx(processHandle, internalTextRange, 0, winKernel.MEM_RELEASE)
181 buf = ctypes.create_string_buffer(bufLen)
182 winKernel.readProcessMemory(processHandle, internalBuf, buf, bufLen, None)
183 finally:
184 winKernel.virtualFreeEx(processHandle, internalBuf, 0, winKernel.MEM_RELEASE)
185 return textUtils.getTextFromRawBytes(buf.raw, numChars=numBytes, encoding=self.encoding, errorsFallback="surrogateescape")
186
187 def _getWordOffsets(self,offset):
188 start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDSTARTPOSITION,offset,0)
189 end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,start,0)
190 if end<=offset:
191 start=end
192 end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,offset,0)
193 return [start,end]
194
195 def _getLineNumFromOffset(self,offset):
196 return watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)
197
198 def _getLineOffsets(self,offset):
199 if watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETWRAPMODE,None,None)!=SC_WRAP_NONE:
200 # Lines in Scintilla refer to document lines, not wrapped lines.
201 # There's no way to retrieve wrapped lines, so use screen coordinates.
202 y = self._getPointFromOffset(offset).y
203 location=self.obj.location
204 start = self._getOffsetFromPoint(location.left, y)
205 end=self._getOffsetFromPoint(location.right, y)
206 # If this line wraps to the next line,
207 # end is the first offset of the next line.
208 if self._getPointFromOffset(end).y==y:
209 # This is the end of the document line.
210 # Include the EOL characters in the returned offsets.
211 end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,end,None)
212 return (start,end)
213
214 line=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)
215 start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMLINE,line,0)
216 end=start+watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINELENGTH,line,0)
217 return (start,end)
218
219 def _getParagraphOffsets(self,offset):
220 return self._getLineOffsets(offset)
221
222 def _getCharacterOffsets(self,offset):
223 if offset>=self._getStoryLength(): return offset,offset+1
224 end=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,offset,0)
225 start=offset
226 tempOffset=offset-1
227
228 while tempOffset > INVALID_POSITION:
229 start=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,tempOffset,0)
230 if start<end:
231 break
232 elif tempOffset==0:
233 start=tempOffset
234 break
235 else:
236 tempOffset-=1
237 return [start,end]
238
239
240 #The Scintilla NVDA object, inherists the generic MSAA NVDA object
241 class Scintilla(EditableTextWithAutoSelectDetection, Window):
242
243 TextInfo=ScintillaTextInfo
244
245 #The name of the object is gotten by the standard way of getting a window name, can't use MSAA name (since it contains all the text)
246 def _get_name(self):
247 return winUser.getWindowText(self.windowHandle)
248
249 #The role of the object should be editable text
250 def _get_role(self):
251 return controlTypes.ROLE_EDITABLETEXT
252
253 def _get_states(self):
254 states = super(Scintilla, self)._get_states()
255 # Scintilla controls are always multiline.
256 states.add(controlTypes.STATE_MULTILINE)
257 return states
258
```
--- END FILES ---
Please first localize the bug based on the issue statement, and then generate a patch according to the `git diff` format fenced by three backticks.
Here is an example:
```diff
diff --git a/examples/server_async.py b/examples/server_async.py
--- a/examples/server_async.py
+++ b/examples/server_async.py
@@ -313,4 +313,4 @@
if __name__ == "__main__":
- asyncio.run(run_async_server("."), debug=True)
+ asyncio.run(run_async_server(), debug=True)
diff --git a/examples/server_sync.py b/examples/server_sync.py
--- a/examples/server_sync.py
+++ b/examples/server_sync.py
@@ -313,5 +313,5 @@
if __name__ == "__main__":
- server = run_sync_server(".")
+ server = run_sync_server()
server.shutdown()
```
|
diff --git a/source/NVDAObjects/window/scintilla.py b/source/NVDAObjects/window/scintilla.py
--- a/source/NVDAObjects/window/scintilla.py
+++ b/source/NVDAObjects/window/scintilla.py
@@ -1,3 +1,8 @@
+#A part of NonVisual Desktop Access (NVDA)
+#Copyright (C) 2009-2019 NV Access Limited, Arnold Loubriat, Babbage B.V.
+#This file is covered by the GNU General Public License.
+#See the file COPYING for more details.
+
import ctypes
import IAccessibleHandler
import speech
@@ -121,7 +126,7 @@
winKernel.readProcessMemory(self.obj.processHandle,internalBuf,fontNameBuf,len(fontNameBuf),None)
finally:
winKernel.virtualFreeEx(self.obj.processHandle,internalBuf,0,winKernel.MEM_RELEASE)
- formatField["font-name"]=fontNameBuf.value
+ formatField["font-name"]=fontNameBuf.value.decode("utf-8")
if formatConfig["reportFontSize"]:
formatField["font-size"]="%spt"%watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETSIZE,style,0)
if formatConfig["reportLineNumber"]:
|
{"golden_diff": "diff --git a/source/NVDAObjects/window/scintilla.py b/source/NVDAObjects/window/scintilla.py\n--- a/source/NVDAObjects/window/scintilla.py\n+++ b/source/NVDAObjects/window/scintilla.py\n@@ -1,3 +1,8 @@\n+#A part of NonVisual Desktop Access (NVDA)\r\n+#Copyright (C) 2009-2019 NV Access Limited, Arnold Loubriat, Babbage B.V.\r\n+#This file is covered by the GNU General Public License.\r\n+#See the file COPYING for more details.\r\n+\r\n import ctypes\r\n import IAccessibleHandler\r\n import speech\r\n@@ -121,7 +126,7 @@\n \t\t\t\twinKernel.readProcessMemory(self.obj.processHandle,internalBuf,fontNameBuf,len(fontNameBuf),None)\r\n \t\t\tfinally:\r\n \t\t\t\twinKernel.virtualFreeEx(self.obj.processHandle,internalBuf,0,winKernel.MEM_RELEASE)\r\n-\t\t\tformatField[\"font-name\"]=fontNameBuf.value\r\n+\t\t\tformatField[\"font-name\"]=fontNameBuf.value.decode(\"utf-8\")\r\n \t\tif formatConfig[\"reportFontSize\"]:\r\n \t\t\tformatField[\"font-size\"]=\"%spt\"%watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETSIZE,style,0)\r\n \t\tif formatConfig[\"reportLineNumber\"]:\n", "issue": "Python 3 staging/Notepad++: obtainig font name fails because it is a bytes-like object\n#9557 # Steps to reproduce:\r\n1. Install Python 3 staging snapshot.\r\n2. Run Notepad++ or any app that embeds Scintilla controls.\r\n3. Try obtaining formatting info (NVDA+F).\r\n\r\n### Actual behavior:\r\nThe following traceback is recorded:\r\n\r\n```\r\nERROR - scriptHandler.executeScript (11:07:35.251):\r\nerror executing script: <bound method GlobalCommands.script_reportFormatting of <globalCommands.GlobalCommands object at 0x06B54FD0>> with gesture 'NVDA+f'\r\nTraceback (most recent call last):\r\n File \"scriptHandler.pyc\", line 189, in executeScript\r\n File \"globalCommands.pyc\", line 1393, in script_reportFormatting\r\n File \"globalCommands.pyc\", line 1366, in _reportFormattingHelper\r\n File \"textInfos\\__init__.pyc\", line 500, in getFormatFieldSpeech\r\n File \"speech\\__init__.pyc\", line 1717, in getFormatFieldSpeech\r\nTypeError: sequence item 0: expected str instance, bytes found\r\n```\r\n### Expected behavior:\r\nNo errors.\r\n\r\n### System configuration\r\n#### NVDA installed/portable/running from source:\r\nInstalled and portable\r\n\r\n#### NVDA version:\r\nPython 3 staging 17954\r\n\r\n#### Windows version:\r\nWindows 10 Version 1903\r\n\r\n#### Name and version of other software in use when reproducing the issue:\r\nnotepad++ 7.6.6\r\n\r\n#### Other information about your system:\r\nNone\r\n\r\n### Other questions\r\n#### Does the issue still occur after restarting your PC?\r\nYes\r\n\r\n#### Have you tried any other versions of NVDA? If so, please report their behaviors.\r\nYes - master snapshot, 2019.1.1.\r\n\n", "before_files": [{"content": "import ctypes\r\nimport IAccessibleHandler\r\nimport speech\r\nimport textInfos.offsets\r\nimport winKernel\r\nimport winUser\r\nimport globalVars\r\nimport controlTypes\r\nimport config\r\nfrom . import Window\r\nfrom .. import NVDAObjectTextInfo\r\nfrom ..behaviors import EditableTextWithAutoSelectDetection\r\nimport locale\r\nimport watchdog\r\nimport eventHandler\r\nimport locationHelper\r\nimport textUtils\r\n\r\n# Window messages\r\nSCI_POSITIONFROMPOINT=2022\r\nSCI_POINTXFROMPOSITION=2164\r\nSCI_POINTYFROMPOSITION=2165\r\nSCI_GETTEXTRANGE=2162\r\nSCI_GETTEXT=2182\r\nSCI_GETTEXTLENGTH=2183\r\nSCI_GETLENGTH=2006\r\nSCI_GETCURRENTPOS=2008\r\nSCI_GETANCHOR=2009\r\nSCI_GOTOPOS=2025\r\nSCI_SETCURRENTPOS=2141\r\nSCI_GETSELECTIONSTART=2143\r\nSCI_GETSELECTIONEND=2145\r\nSCI_SETSEL=2160\r\nSCI_GETLINEENDPOSITION=2136\r\nSCI_GETLINECOUNT=2154\r\nSCI_LINEFROMPOSITION=2166\r\nSCI_POSITIONFROMLINE=2167\r\nSCI_LINELENGTH=2350\r\nSCI_GETSTYLEAT=2010\r\nSCI_STYLEGETFONT=2486\r\nSCI_STYLEGETSIZE=2485\r\nSCI_STYLEGETBOLD=2483\r\nSCI_STYLEGETITALIC=2484\r\nSCI_STYLEGETUNDERLINE=2488\r\nSCI_WORDSTARTPOSITION=2266\r\nSCI_WORDENDPOSITION=2267\r\nSC_WRAP_NONE=0\r\nSCI_GETWRAPMODE=2269\r\nSCI_GETCODEPAGE=2137\r\nSCI_POSITIONAFTER=2418\r\n\r\n#constants\r\n#: Represents an invalid position within a document.\r\nINVALID_POSITION=-1\r\nSTYLE_DEFAULT=32\r\nSC_CP_UTF8=65001\r\n\r\nclass CharacterRangeStruct(ctypes.Structure):\r\n\t_fields_=[\r\n\t\t('cpMin',ctypes.c_long),\r\n\t\t('cpMax',ctypes.c_long),\r\n\t]\r\n\r\nclass TextRangeStruct(ctypes.Structure):\r\n\t_fields_=[\r\n\t\t('chrg',CharacterRangeStruct),\r\n\t\t('lpstrText',ctypes.c_char_p),\r\n\t]\r\n\r\nclass ScintillaTextInfo(textInfos.offsets.OffsetsTextInfo):\r\n\r\n\tdef _get_encoding(self):\r\n\t\tcp=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCODEPAGE,0,0)\r\n\t\tif cp==SC_CP_UTF8:\r\n\t\t\treturn \"utf-8\"\r\n\t\telse:\r\n\t\t\treturn locale.getlocale()[1]\r\n\r\n\tdef _getOffsetFromPoint(self,x,y):\r\n\t\tx, y = winUser.ScreenToClient(self.obj.windowHandle, x, y)\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMPOINT,x,y)\r\n\r\n\tdef _getPointFromOffset(self,offset):\r\n\t\tpoint=locationHelper.Point(\r\n\t\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTXFROMPOSITION,None,offset),\r\n\t\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)\r\n\t\t).toScreen(self.obj.windowHandle)\r\n\t\tif point.x is not None and point.y is not None:\r\n\t\t\treturn point\r\n\t\telse:\r\n\t\t\traise NotImplementedError\r\n\r\n\tdef _getFormatFieldAndOffsets(self,offset,formatConfig,calculateOffsets=True):\r\n\t\tstyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,offset,0)\r\n\t\tif calculateOffsets:\r\n\t\t\t#we need to manually see how far the style goes, limit to line\r\n\t\t\tlineStart,lineEnd=self._getLineOffsets(offset)\r\n\t\t\tstartOffset=offset\r\n\t\t\twhile startOffset>lineStart:\r\n\t\t\t\tcurStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,startOffset-1,0)\r\n\t\t\t\tif curStyle==style:\r\n\t\t\t\t\tstartOffset-=1\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\t\tendOffset=offset+1\r\n\t\t\twhile endOffset<lineEnd:\r\n\t\t\t\tcurStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,endOffset,0)\r\n\t\t\t\tif curStyle==style:\r\n\t\t\t\t\tendOffset+=1\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\tstartOffset,endOffset=(self._startOffset,self._endOffset)\r\n\t\tformatField=textInfos.FormatField()\r\n\t\tif formatConfig[\"reportFontName\"]:\r\n\t\t\t#To get font name, We need to allocate memory with in Scintilla's process, and then copy it out\r\n\t\t\tfontNameBuf=ctypes.create_string_buffer(32)\r\n\t\t\tinternalBuf=winKernel.virtualAllocEx(self.obj.processHandle,None,len(fontNameBuf),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)\r\n\t\t\ttry:\r\n\t\t\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETFONT,style, internalBuf)\r\n\t\t\t\twinKernel.readProcessMemory(self.obj.processHandle,internalBuf,fontNameBuf,len(fontNameBuf),None)\r\n\t\t\tfinally:\r\n\t\t\t\twinKernel.virtualFreeEx(self.obj.processHandle,internalBuf,0,winKernel.MEM_RELEASE)\r\n\t\t\tformatField[\"font-name\"]=fontNameBuf.value\r\n\t\tif formatConfig[\"reportFontSize\"]:\r\n\t\t\tformatField[\"font-size\"]=\"%spt\"%watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETSIZE,style,0)\r\n\t\tif formatConfig[\"reportLineNumber\"]:\r\n\t\t\tformatField[\"line-number\"]=self._getLineNumFromOffset(offset)+1\r\n\t\tif formatConfig[\"reportFontAttributes\"]:\r\n\t\t\tformatField[\"bold\"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETBOLD,style,0))\r\n\t\t\tformatField[\"italic\"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETITALIC,style,0))\r\n\t\t\tformatField[\"underline\"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETUNDERLINE,style,0))\r\n\t\treturn formatField,(startOffset,endOffset)\r\n\r\n\tdef _getCaretOffset(self):\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCURRENTPOS,0,0)\r\n\r\n\tdef _setCaretOffset(self,offset):\r\n\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GOTOPOS,offset,0)\r\n\t\t# #5678: A caret event sometimes doesn't get fired when we do this,\r\n\t\t# so fake one just in case.\r\n\t\teventHandler.executeEvent(\"caret\", self.obj)\r\n\r\n\tdef _getSelectionOffsets(self):\r\n\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONSTART,0,0)\r\n\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONEND,0,0)\r\n\t\treturn (start,end)\r\n\r\n\tdef _setSelectionOffsets(self,start,end):\r\n\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_SETSEL,start,end)\r\n\r\n\tdef _getStoryText(self):\r\n\t\tif not hasattr(self,'_storyText'):\r\n\t\t\tstoryLength=self._getStoryLength()\r\n\t\t\tself._storyText=self._getTextRange(0,storyLength)\r\n\t\treturn self._storyText\r\n\r\n\tdef _getStoryLength(self):\r\n\t\tif not hasattr(self,'_storyLength'):\r\n\t\t\tself._storyLength=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETTEXTLENGTH,0,0)\r\n\t\treturn self._storyLength\r\n\r\n\tdef _getLineCount(self):\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETLINECOUNT,0,0)\r\n\r\n\tdef _getTextRange(self,start,end):\r\n\t\tbufLen = (end - start) + 1\r\n\t\ttextRange = TextRangeStruct()\r\n\t\ttextRange.chrg.cpMin = start\r\n\t\ttextRange.chrg.cpMax = end\r\n\t\tprocessHandle = self.obj.processHandle\r\n\t\tinternalBuf = winKernel.virtualAllocEx(processHandle, None, bufLen, winKernel.MEM_COMMIT, winKernel.PAGE_READWRITE)\r\n\t\ttry:\r\n\t\t\ttextRange.lpstrText = internalBuf\r\n\t\t\tinternalTextRange = winKernel.virtualAllocEx(processHandle, None, ctypes.sizeof(textRange), winKernel.MEM_COMMIT, winKernel.PAGE_READWRITE)\r\n\t\t\ttry:\r\n\t\t\t\twinKernel.writeProcessMemory(processHandle, internalTextRange, ctypes.byref(textRange), ctypes.sizeof(textRange), None)\r\n\t\t\t\tnumBytes = watchdog.cancellableSendMessage(self.obj.windowHandle, SCI_GETTEXTRANGE, 0, internalTextRange)\r\n\t\t\tfinally:\r\n\t\t\t\twinKernel.virtualFreeEx(processHandle, internalTextRange, 0, winKernel.MEM_RELEASE)\r\n\t\t\tbuf = ctypes.create_string_buffer(bufLen)\r\n\t\t\twinKernel.readProcessMemory(processHandle, internalBuf, buf, bufLen, None)\r\n\t\tfinally:\r\n\t\t\twinKernel.virtualFreeEx(processHandle, internalBuf, 0, winKernel.MEM_RELEASE)\r\n\t\treturn textUtils.getTextFromRawBytes(buf.raw, numChars=numBytes, encoding=self.encoding, errorsFallback=\"surrogateescape\")\r\n\r\n\tdef _getWordOffsets(self,offset):\r\n\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDSTARTPOSITION,offset,0)\r\n\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,start,0)\r\n\t\tif end<=offset:\r\n\t\t\tstart=end\r\n\t\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,offset,0)\r\n\t\treturn [start,end]\r\n\r\n\tdef _getLineNumFromOffset(self,offset):\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)\r\n\r\n\tdef _getLineOffsets(self,offset):\r\n\t\tif watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETWRAPMODE,None,None)!=SC_WRAP_NONE:\r\n\t\t\t# Lines in Scintilla refer to document lines, not wrapped lines.\r\n\t\t\t# There's no way to retrieve wrapped lines, so use screen coordinates.\r\n\t\t\ty = self._getPointFromOffset(offset).y\r\n\t\t\tlocation=self.obj.location\r\n\t\t\tstart = self._getOffsetFromPoint(location.left, y)\r\n\t\t\tend=self._getOffsetFromPoint(location.right, y)\r\n\t\t\t# If this line wraps to the next line,\r\n\t\t\t# end is the first offset of the next line.\r\n\t\t\tif self._getPointFromOffset(end).y==y:\r\n\t\t\t\t# This is the end of the document line.\r\n\t\t\t\t# Include the EOL characters in the returned offsets.\r\n\t\t\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,end,None)\r\n\t\t\treturn (start,end)\r\n\r\n\t\tline=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)\r\n\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMLINE,line,0)\r\n\t\tend=start+watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINELENGTH,line,0)\r\n\t\treturn (start,end)\r\n\r\n\tdef _getParagraphOffsets(self,offset):\r\n\t\treturn self._getLineOffsets(offset)\r\n\r\n\tdef _getCharacterOffsets(self,offset):\r\n\t\tif offset>=self._getStoryLength(): return offset,offset+1\r\n\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,offset,0)\r\n\t\tstart=offset\r\n\t\ttempOffset=offset-1\r\n\t\t\r\n\t\twhile tempOffset > INVALID_POSITION:\r\n\t\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,tempOffset,0)\r\n\t\t\tif start<end:\r\n\t\t\t\tbreak\r\n\t\t\telif tempOffset==0:\r\n\t\t\t\tstart=tempOffset\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\ttempOffset-=1\r\n\t\treturn [start,end]\r\n\r\n\r\n#The Scintilla NVDA object, inherists the generic MSAA NVDA object\r\nclass Scintilla(EditableTextWithAutoSelectDetection, Window):\r\n\r\n\tTextInfo=ScintillaTextInfo\r\n\r\n#The name of the object is gotten by the standard way of getting a window name, can't use MSAA name (since it contains all the text)\r\n\tdef _get_name(self):\r\n\t\treturn winUser.getWindowText(self.windowHandle)\r\n\r\n#The role of the object should be editable text\r\n\tdef _get_role(self):\r\n\t\treturn controlTypes.ROLE_EDITABLETEXT\r\n\r\n\tdef _get_states(self):\r\n\t\tstates = super(Scintilla, self)._get_states()\r\n\t\t# Scintilla controls are always multiline.\r\n\t\tstates.add(controlTypes.STATE_MULTILINE)\r\n\t\treturn states\r\n", "path": "source/NVDAObjects/window/scintilla.py"}], "after_files": [{"content": "#A part of NonVisual Desktop Access (NVDA)\r\n#Copyright (C) 2009-2019 NV Access Limited, Arnold Loubriat, Babbage B.V.\r\n#This file is covered by the GNU General Public License.\r\n#See the file COPYING for more details.\r\n\r\nimport ctypes\r\nimport IAccessibleHandler\r\nimport speech\r\nimport textInfos.offsets\r\nimport winKernel\r\nimport winUser\r\nimport globalVars\r\nimport controlTypes\r\nimport config\r\nfrom . import Window\r\nfrom .. import NVDAObjectTextInfo\r\nfrom ..behaviors import EditableTextWithAutoSelectDetection\r\nimport locale\r\nimport watchdog\r\nimport eventHandler\r\nimport locationHelper\r\nimport textUtils\r\n\r\n# Window messages\r\nSCI_POSITIONFROMPOINT=2022\r\nSCI_POINTXFROMPOSITION=2164\r\nSCI_POINTYFROMPOSITION=2165\r\nSCI_GETTEXTRANGE=2162\r\nSCI_GETTEXT=2182\r\nSCI_GETTEXTLENGTH=2183\r\nSCI_GETLENGTH=2006\r\nSCI_GETCURRENTPOS=2008\r\nSCI_GETANCHOR=2009\r\nSCI_GOTOPOS=2025\r\nSCI_SETCURRENTPOS=2141\r\nSCI_GETSELECTIONSTART=2143\r\nSCI_GETSELECTIONEND=2145\r\nSCI_SETSEL=2160\r\nSCI_GETLINEENDPOSITION=2136\r\nSCI_GETLINECOUNT=2154\r\nSCI_LINEFROMPOSITION=2166\r\nSCI_POSITIONFROMLINE=2167\r\nSCI_LINELENGTH=2350\r\nSCI_GETSTYLEAT=2010\r\nSCI_STYLEGETFONT=2486\r\nSCI_STYLEGETSIZE=2485\r\nSCI_STYLEGETBOLD=2483\r\nSCI_STYLEGETITALIC=2484\r\nSCI_STYLEGETUNDERLINE=2488\r\nSCI_WORDSTARTPOSITION=2266\r\nSCI_WORDENDPOSITION=2267\r\nSC_WRAP_NONE=0\r\nSCI_GETWRAPMODE=2269\r\nSCI_GETCODEPAGE=2137\r\nSCI_POSITIONAFTER=2418\r\n\r\n#constants\r\n#: Represents an invalid position within a document.\r\nINVALID_POSITION=-1\r\nSTYLE_DEFAULT=32\r\nSC_CP_UTF8=65001\r\n\r\nclass CharacterRangeStruct(ctypes.Structure):\r\n\t_fields_=[\r\n\t\t('cpMin',ctypes.c_long),\r\n\t\t('cpMax',ctypes.c_long),\r\n\t]\r\n\r\nclass TextRangeStruct(ctypes.Structure):\r\n\t_fields_=[\r\n\t\t('chrg',CharacterRangeStruct),\r\n\t\t('lpstrText',ctypes.c_char_p),\r\n\t]\r\n\r\nclass ScintillaTextInfo(textInfos.offsets.OffsetsTextInfo):\r\n\r\n\tdef _get_encoding(self):\r\n\t\tcp=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCODEPAGE,0,0)\r\n\t\tif cp==SC_CP_UTF8:\r\n\t\t\treturn \"utf-8\"\r\n\t\telse:\r\n\t\t\treturn locale.getlocale()[1]\r\n\r\n\tdef _getOffsetFromPoint(self,x,y):\r\n\t\tx, y = winUser.ScreenToClient(self.obj.windowHandle, x, y)\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMPOINT,x,y)\r\n\r\n\tdef _getPointFromOffset(self,offset):\r\n\t\tpoint=locationHelper.Point(\r\n\t\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTXFROMPOSITION,None,offset),\r\n\t\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POINTYFROMPOSITION,None,offset)\r\n\t\t).toScreen(self.obj.windowHandle)\r\n\t\tif point.x is not None and point.y is not None:\r\n\t\t\treturn point\r\n\t\telse:\r\n\t\t\traise NotImplementedError\r\n\r\n\tdef _getFormatFieldAndOffsets(self,offset,formatConfig,calculateOffsets=True):\r\n\t\tstyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,offset,0)\r\n\t\tif calculateOffsets:\r\n\t\t\t#we need to manually see how far the style goes, limit to line\r\n\t\t\tlineStart,lineEnd=self._getLineOffsets(offset)\r\n\t\t\tstartOffset=offset\r\n\t\t\twhile startOffset>lineStart:\r\n\t\t\t\tcurStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,startOffset-1,0)\r\n\t\t\t\tif curStyle==style:\r\n\t\t\t\t\tstartOffset-=1\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\t\tendOffset=offset+1\r\n\t\t\twhile endOffset<lineEnd:\r\n\t\t\t\tcurStyle=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSTYLEAT,endOffset,0)\r\n\t\t\t\tif curStyle==style:\r\n\t\t\t\t\tendOffset+=1\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\t\telse:\r\n\t\t\tstartOffset,endOffset=(self._startOffset,self._endOffset)\r\n\t\tformatField=textInfos.FormatField()\r\n\t\tif formatConfig[\"reportFontName\"]:\r\n\t\t\t#To get font name, We need to allocate memory with in Scintilla's process, and then copy it out\r\n\t\t\tfontNameBuf=ctypes.create_string_buffer(32)\r\n\t\t\tinternalBuf=winKernel.virtualAllocEx(self.obj.processHandle,None,len(fontNameBuf),winKernel.MEM_COMMIT,winKernel.PAGE_READWRITE)\r\n\t\t\ttry:\r\n\t\t\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETFONT,style, internalBuf)\r\n\t\t\t\twinKernel.readProcessMemory(self.obj.processHandle,internalBuf,fontNameBuf,len(fontNameBuf),None)\r\n\t\t\tfinally:\r\n\t\t\t\twinKernel.virtualFreeEx(self.obj.processHandle,internalBuf,0,winKernel.MEM_RELEASE)\r\n\t\t\tformatField[\"font-name\"]=fontNameBuf.value.decode(\"utf-8\")\r\n\t\tif formatConfig[\"reportFontSize\"]:\r\n\t\t\tformatField[\"font-size\"]=\"%spt\"%watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETSIZE,style,0)\r\n\t\tif formatConfig[\"reportLineNumber\"]:\r\n\t\t\tformatField[\"line-number\"]=self._getLineNumFromOffset(offset)+1\r\n\t\tif formatConfig[\"reportFontAttributes\"]:\r\n\t\t\tformatField[\"bold\"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETBOLD,style,0))\r\n\t\t\tformatField[\"italic\"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETITALIC,style,0))\r\n\t\t\tformatField[\"underline\"]=bool(watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_STYLEGETUNDERLINE,style,0))\r\n\t\treturn formatField,(startOffset,endOffset)\r\n\r\n\tdef _getCaretOffset(self):\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETCURRENTPOS,0,0)\r\n\r\n\tdef _setCaretOffset(self,offset):\r\n\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GOTOPOS,offset,0)\r\n\t\t# #5678: A caret event sometimes doesn't get fired when we do this,\r\n\t\t# so fake one just in case.\r\n\t\teventHandler.executeEvent(\"caret\", self.obj)\r\n\r\n\tdef _getSelectionOffsets(self):\r\n\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONSTART,0,0)\r\n\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETSELECTIONEND,0,0)\r\n\t\treturn (start,end)\r\n\r\n\tdef _setSelectionOffsets(self,start,end):\r\n\t\twatchdog.cancellableSendMessage(self.obj.windowHandle,SCI_SETSEL,start,end)\r\n\r\n\tdef _getStoryText(self):\r\n\t\tif not hasattr(self,'_storyText'):\r\n\t\t\tstoryLength=self._getStoryLength()\r\n\t\t\tself._storyText=self._getTextRange(0,storyLength)\r\n\t\treturn self._storyText\r\n\r\n\tdef _getStoryLength(self):\r\n\t\tif not hasattr(self,'_storyLength'):\r\n\t\t\tself._storyLength=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETTEXTLENGTH,0,0)\r\n\t\treturn self._storyLength\r\n\r\n\tdef _getLineCount(self):\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETLINECOUNT,0,0)\r\n\r\n\tdef _getTextRange(self,start,end):\r\n\t\tbufLen = (end - start) + 1\r\n\t\ttextRange = TextRangeStruct()\r\n\t\ttextRange.chrg.cpMin = start\r\n\t\ttextRange.chrg.cpMax = end\r\n\t\tprocessHandle = self.obj.processHandle\r\n\t\tinternalBuf = winKernel.virtualAllocEx(processHandle, None, bufLen, winKernel.MEM_COMMIT, winKernel.PAGE_READWRITE)\r\n\t\ttry:\r\n\t\t\ttextRange.lpstrText = internalBuf\r\n\t\t\tinternalTextRange = winKernel.virtualAllocEx(processHandle, None, ctypes.sizeof(textRange), winKernel.MEM_COMMIT, winKernel.PAGE_READWRITE)\r\n\t\t\ttry:\r\n\t\t\t\twinKernel.writeProcessMemory(processHandle, internalTextRange, ctypes.byref(textRange), ctypes.sizeof(textRange), None)\r\n\t\t\t\tnumBytes = watchdog.cancellableSendMessage(self.obj.windowHandle, SCI_GETTEXTRANGE, 0, internalTextRange)\r\n\t\t\tfinally:\r\n\t\t\t\twinKernel.virtualFreeEx(processHandle, internalTextRange, 0, winKernel.MEM_RELEASE)\r\n\t\t\tbuf = ctypes.create_string_buffer(bufLen)\r\n\t\t\twinKernel.readProcessMemory(processHandle, internalBuf, buf, bufLen, None)\r\n\t\tfinally:\r\n\t\t\twinKernel.virtualFreeEx(processHandle, internalBuf, 0, winKernel.MEM_RELEASE)\r\n\t\treturn textUtils.getTextFromRawBytes(buf.raw, numChars=numBytes, encoding=self.encoding, errorsFallback=\"surrogateescape\")\r\n\r\n\tdef _getWordOffsets(self,offset):\r\n\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDSTARTPOSITION,offset,0)\r\n\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,start,0)\r\n\t\tif end<=offset:\r\n\t\t\tstart=end\r\n\t\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_WORDENDPOSITION,offset,0)\r\n\t\treturn [start,end]\r\n\r\n\tdef _getLineNumFromOffset(self,offset):\r\n\t\treturn watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)\r\n\r\n\tdef _getLineOffsets(self,offset):\r\n\t\tif watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_GETWRAPMODE,None,None)!=SC_WRAP_NONE:\r\n\t\t\t# Lines in Scintilla refer to document lines, not wrapped lines.\r\n\t\t\t# There's no way to retrieve wrapped lines, so use screen coordinates.\r\n\t\t\ty = self._getPointFromOffset(offset).y\r\n\t\t\tlocation=self.obj.location\r\n\t\t\tstart = self._getOffsetFromPoint(location.left, y)\r\n\t\t\tend=self._getOffsetFromPoint(location.right, y)\r\n\t\t\t# If this line wraps to the next line,\r\n\t\t\t# end is the first offset of the next line.\r\n\t\t\tif self._getPointFromOffset(end).y==y:\r\n\t\t\t\t# This is the end of the document line.\r\n\t\t\t\t# Include the EOL characters in the returned offsets.\r\n\t\t\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,end,None)\r\n\t\t\treturn (start,end)\r\n\r\n\t\tline=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINEFROMPOSITION,offset,0)\r\n\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONFROMLINE,line,0)\r\n\t\tend=start+watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_LINELENGTH,line,0)\r\n\t\treturn (start,end)\r\n\r\n\tdef _getParagraphOffsets(self,offset):\r\n\t\treturn self._getLineOffsets(offset)\r\n\r\n\tdef _getCharacterOffsets(self,offset):\r\n\t\tif offset>=self._getStoryLength(): return offset,offset+1\r\n\t\tend=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,offset,0)\r\n\t\tstart=offset\r\n\t\ttempOffset=offset-1\r\n\t\t\r\n\t\twhile tempOffset > INVALID_POSITION:\r\n\t\t\tstart=watchdog.cancellableSendMessage(self.obj.windowHandle,SCI_POSITIONAFTER,tempOffset,0)\r\n\t\t\tif start<end:\r\n\t\t\t\tbreak\r\n\t\t\telif tempOffset==0:\r\n\t\t\t\tstart=tempOffset\r\n\t\t\t\tbreak\r\n\t\t\telse:\r\n\t\t\t\ttempOffset-=1\r\n\t\treturn [start,end]\r\n\r\n\r\n#The Scintilla NVDA object, inherists the generic MSAA NVDA object\r\nclass Scintilla(EditableTextWithAutoSelectDetection, Window):\r\n\r\n\tTextInfo=ScintillaTextInfo\r\n\r\n#The name of the object is gotten by the standard way of getting a window name, can't use MSAA name (since it contains all the text)\r\n\tdef _get_name(self):\r\n\t\treturn winUser.getWindowText(self.windowHandle)\r\n\r\n#The role of the object should be editable text\r\n\tdef _get_role(self):\r\n\t\treturn controlTypes.ROLE_EDITABLETEXT\r\n\r\n\tdef _get_states(self):\r\n\t\tstates = super(Scintilla, self)._get_states()\r\n\t\t# Scintilla controls are always multiline.\r\n\t\tstates.add(controlTypes.STATE_MULTILINE)\r\n\t\treturn states\r\n", "path": "source/NVDAObjects/window/scintilla.py"}]}
| 3,991 | 276 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.